source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
DMD5_fmt_plug.c | /*
* DMD5_fmt.c
*
* DIGEST-MD5 authentication module for Solar Designer's John the Ripper
* Uses Solar Designer's MD5 implementation.
*
* This software is Copyright 2006, regenrecht@o2.pl, and
* Copyright 2011, 2013 magnum, and it is hereby released to the general
* public under the following terms: Redistribution and use in source and
* binary forms, with or without modification, are permitted.
*
* Input format:
* $DIGEST-MD5$ username $ realm $ nonce $ digest_uri $ cnonce $ nc $ qop $ response [ $ authzid ]
*
* Just base64-decode the blob you see when sniffing, to get all data needed for above.
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_DMD5;
#elif FMT_REGISTERS_H
john_register_one(&fmt_DMD5);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1024
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "md5.h"
#include "common.h"
#include "formats.h"
#include "memdbg.h"
#define FORMAT_LABEL "dmd5"
#define FORMAT_NAME "DIGEST-MD5 C/R"
#define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define MD5_HEX_SIZE (2 * BINARY_SIZE)
#define BINARY_SIZE 16
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(cur_salt)
#define SALT_ALIGN 1
#define DSIZE (128 - sizeof(int))
#define CIPHERTEXT_LENGTH (DSIZE * 4)
#define PLAINTEXT_LENGTH 32
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static const char itoa16_shr_04[] =
"0000000000000000"
"1111111111111111"
"2222222222222222"
"3333333333333333"
"4444444444444444"
"5555555555555555"
"6666666666666666"
"7777777777777777"
"8888888888888888"
"9999999999999999"
"aaaaaaaaaaaaaaaa"
"bbbbbbbbbbbbbbbb"
"cccccccccccccccc"
"dddddddddddddddd"
"eeeeeeeeeeeeeeee"
"ffffffffffffffff";
static const char itoa16_and_0f[] =
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef";
static struct {
unsigned char login_id[DSIZE]; // username:realm
unsigned int login_id_len;
unsigned char nonces[DSIZE]; // :nonce:cnonce[:authzid]
unsigned int nonces_len;
unsigned char prehash_KD[DSIZE]; // :nonce:nc:cnonce:qop:hex_A2_hash
unsigned int prehash_KD_len;
} cur_salt;
static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE/4];
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static struct fmt_tests tests[] = {
{"$DIGEST-MD5$s3443$pjwstk$00$ldap/10.253.34.43$0734d94ad9abd5bd7fc5e7e77bcf49a8$00000001$auth-int$dd98347e6da3efd6c4ff2263a729ef77", "test"},
{NULL}
};
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
PLAINTEXT_LENGTH + 1);
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
BINARY_SIZE);
}
static void done(void)
{
MEM_FREE(crypt_key);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *data = ciphertext + 12;
if (strncmp(ciphertext, "$DIGEST-MD5$", 12) != 0)
return 0;
if (strlen(ciphertext) > CIPHERTEXT_LENGTH)
return 0;
if (!(p = strchr(data, '$')) || (int)(p-data) >= 64) // username
return 0;
data = p + 1; // realm
if (!(p = strchr(data, '$')) || (int)(p-data) >= 64)
return 0;
data = p + 1; // nonce
if (!(p = strchr(data, '$')) || (int)(p-data) >= 64)
return 0;
data = p + 1; // digest_uri
if (!(p = strchr(data, '$')) || (int)(p-data) >= DSIZE)
return 0;
data = p + 1; // cnonce
if (!(p = strchr(data, '$')) || (int)(p-data) > MD5_HEX_SIZE)
return 0;
if ( abs(hexlenl(data)) != p-data)
return 0;
data = p + 1; // nc
if (!(p = strchr(data, '$')) || (int)(p-data) >= 9)
return 0;
data = p + 1; // qop
if (strncmp(data, "auth", 4) && strncmp(data, "auth-int", 8) &&
strncmp(data, "auth-conf", 9))
return 0;
if (!(p = strchr(data, '$')) || (int)(p-data) >= 9)
return 0;
data = p + 1; // authzid, optional
if ((p = strchr(data, '$'))) {
if ((int)(p-data) > MD5_HEX_SIZE || strlen(&p[1]) >= 8)
return 0;
} else if (strlen(data) > MD5_HEX_SIZE)
return 0;
if (hexlenl(data) !=strlen(data))
return 0;
return 1;
}
static void *get_binary(char *ciphertext)
{
static ARCH_WORD_32 out[BINARY_SIZE/4];
char response[MD5_HEX_SIZE + 1];
unsigned int i;
char *p, *data = ciphertext + 12;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$');
if (p && (p - data + 1) < sizeof(response))
strnzcpy(response, data, p - data + 1);
else
strnzcpy(response, data, sizeof(response));
for (i = 0; i < BINARY_SIZE; ++i)
((unsigned char*)out)[i] =
(atoi16[ARCH_INDEX(response[i*2])] << 4)
+ atoi16[ARCH_INDEX(response[i*2+1])];
return (void*)out;
}
static void *get_salt(char *ciphertext)
{
char username[64];
char realm[64];
char nonce[64];
char digest_uri[DSIZE];
char cnonce[MD5_HEX_SIZE + 1];
char nc[9];
char qop[9];
char authzid[8];
unsigned char *ptr_src, *ptr_dst, v, i;
char *ccopy = strdup(ciphertext);
char *p, *data = ccopy + 12;
MD5_CTX ctx;
char A2[DSIZE];
unsigned char hash[BINARY_SIZE];
unsigned char hex_hash[2*MD5_HEX_SIZE];
if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(username, data, sizeof(username));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(realm, data, sizeof(realm));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(nonce, data, sizeof(nonce));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(digest_uri, data, sizeof(digest_uri));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(cnonce, data, sizeof(cnonce));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(nc, data, sizeof(nc));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(qop, data, sizeof(qop));
data = p + 1;
if ((p = strchr(data, '$'))) {
*p = 0;
data = p + 1;
if (*data)
strnzcpy(authzid, data, sizeof(authzid));
else
*authzid = 0;
} else {
*authzid = 0;
}
if (!strcmp(qop, "auth"))
snprintf((char*)A2, sizeof(A2),
"AUTHENTICATE:%s", digest_uri);
else if (!strcmp(qop, "auth-int") || !strcmp(qop, "auth-conf"))
snprintf((char*)A2, sizeof(A2),
"AUTHENTICATE:%s:00000000000000000000000000000000",
digest_uri);
MD5_Init(&ctx);
MD5_Update(&ctx, A2, strlen((char*)A2));
MD5_Final(hash, &ctx);
ptr_src = hash;
ptr_dst = hex_hash;
for (i = 0; i < BINARY_SIZE; ++i) {
v = *ptr_src++;
*ptr_dst++ = itoa16_shr_04[ARCH_INDEX(v)];
*ptr_dst++ = itoa16_and_0f[ARCH_INDEX(v)];
}
*ptr_dst = 0;
snprintf((char*)cur_salt.prehash_KD, sizeof(cur_salt.prehash_KD),
":%s:%s:%s:%s:%s", nonce, nc, cnonce, qop, hex_hash);
cur_salt.prehash_KD_len = strlen((char*)cur_salt.prehash_KD);
if (authzid[0])
snprintf((char*)cur_salt.nonces, sizeof(cur_salt.nonces),
":%s:%s:%s", nonce, cnonce, authzid);
else
snprintf((char*)cur_salt.nonces, sizeof(cur_salt.nonces),
":%s:%s", nonce, cnonce);
cur_salt.nonces_len = strlen((char*)cur_salt.nonces);
snprintf((char*)cur_salt.login_id, sizeof(cur_salt.login_id),
"%s:%s:", username, realm);
cur_salt.login_id_len = strlen((char*)cur_salt.login_id);
MEM_FREE(ccopy);
return (void*)&cur_salt;
}
static void set_salt(void *salt)
{
memcpy(&cur_salt, salt, sizeof(cur_salt));
}
static void set_key(char *key, int index)
{
strnzcpyn(saved_key[index], key, PLAINTEXT_LENGTH + 1);
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
unsigned char hash[16];
unsigned char hex_hash[MD5_HEX_SIZE];
unsigned char *ptr_src, *ptr_dst;
MD5_CTX ctx;
int i;
MD5_Init(&ctx);
// "username:realm"
MD5_Update(&ctx, cur_salt.login_id, cur_salt.login_id_len);
// "password"
MD5_Update(&ctx, saved_key[index], strlen(saved_key[index]));
MD5_Final(hash, &ctx);
MD5_Init(&ctx);
// previous result
MD5_Update(&ctx, hash, BINARY_SIZE);
// ":nonce:cnonce[:authzid]"
MD5_Update(&ctx, cur_salt.nonces, cur_salt.nonces_len);
MD5_Final(hash, &ctx);
// hexify
ptr_src = hash;
ptr_dst = hex_hash;
for (i = 0; i < BINARY_SIZE; ++i) {
unsigned char v = *ptr_src++;
*ptr_dst++ = itoa16_shr_04[ARCH_INDEX(v)];
*ptr_dst++ = itoa16_and_0f[ARCH_INDEX(v)];
}
MD5_Init(&ctx);
// previous result, in hex
MD5_Update(&ctx, hex_hash, MD5_HEX_SIZE);
// ":nonce:nc:cnonce:qop:hex_A2_hash
MD5_Update(&ctx, cur_salt.prehash_KD, cur_salt.prehash_KD_len);
MD5_Final((unsigned char*)crypt_key[index], &ctx);
}
return count;
}
static int cmp_all(void *binary, int count)
{
#if defined(_OPENMP) || (MAX_KEYS_PER_CRYPT > 1)
int index;
ARCH_WORD_32 b = ((ARCH_WORD_32*)binary)[0];
for (index = 0; index < count; index++)
if (crypt_key[index][0] == b)
return 1;
return 0;
#else
return ((ARCH_WORD_32*)binary)[0] == crypt_key[0][0];
#endif
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int get_hash_0(int index) { return crypt_key[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_key[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_key[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_key[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_key[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_key[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_key[index][0] & PH_MASK_6; }
struct fmt_main fmt_DMD5 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
tests
},
{
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
optimized.h | #include <structmember.h>
#include <omp.h>
#include <math.h>
/* For a given class cls and an attribute attr, defines a variable
attr_offset containing the offset of that attribute in the class's
__slots__ data structure. */
#define DECLARE_SLOT_OFFSET(attr,cls) \
PyMemberDescrObject *attr ## _descr = (PyMemberDescrObject *)PyObject_GetAttrString(cls,#attr); \
Py_ssize_t attr ## _offset = attr ## _descr->d_member->offset; \
Py_DECREF(attr ## _descr)
/* After a previous declaration of DECLARE_SLOT_OFFSET, for an
instance obj of that class and the given attr, retrieves the value
of that attribute from its slot. */
#define LOOKUP_FROM_SLOT_OFFSET(type,attr,obj) \
PyArrayObject *attr ## _obj = *((PyArrayObject **)((char *)obj + attr ## _offset)); \
type *attr = (type *)(attr ## _obj->data)
/* LOOKUP_FROM_SLOT_OFFSET without declaring data variable */
#define LOOKUP_FROM_SLOT_OFFSET_UNDECL_DATA(type,attr,obj) \
PyArrayObject *attr ## _obj = *((PyArrayObject **)((char *)obj + attr ## _offset));
/* Same as LOOKUP_FROM_SLOT_OFFSET but ensures the array is contiguous.
Must call DECREF_CONTIGUOUS_ARRAY(attr) to release temporary.
Does PyArray_FLOAT need to be an argument for this to work with doubles? */
// This code is optimized for contiguous arrays, which are typical,
// but we make it work for noncontiguous arrays (e.g. views) by
// creating a contiguous copy if necessary.
//
// CEBALERT: I think there are better alternatives
// e.g. PyArray_GETCONTIGUOUS (PyArrayObject*) (PyObject* op)
// (p248 of numpybook), which only acts if necessary...
// Do we have a case where we know this code is being
// called, so that I can test it easily?
// CEBALERT: weights_obj appears below. Doesn't that mean this thing
// will only work when attr is weights?
#define CONTIGUOUS_ARRAY_FROM_SLOT_OFFSET(type,attr,obj) \
PyArrayObject *attr ## _obj = *((PyArrayObject **)((char *)obj + attr ## _offset)); \
type *attr = 0; \
PyArrayObject * attr ## _array = 0; \
if(PyArray_ISCONTIGUOUS(weights_obj)) \
attr = (type *)(attr ## _obj->data); \
else { \
attr ## _array = (PyArrayObject*) PyArray_ContiguousFromObject((PyObject*)attr ## _obj,PyArray_FLOAT,2,2); \
attr = (type *) attr ## _array->data; \
}
#define DECREF_CONTIGUOUS_ARRAY(attr) \
if(attr ## _array != 0) { \
Py_DECREF(attr ## _array); }
#define UNPACK_FOUR_TUPLE(type,i1,i2,i3,i4,tuple) \
type i1 = *tuple++; \
type i2 = *tuple++; \
type i3 = *tuple++; \
type i4 = *tuple
#define MASK_THRESHOLD 0.5
#define SUM_NORM_TOTAL(cf,weights,_norm_total,rr1,rr2,cc1,cc2) \
LOOKUP_FROM_SLOT_OFFSET(float,mask,cf); \
double total = 0.0; \
float* weights_init = weights; \
int i, j; \
for (i=rr1; i<rr2; ++i) { \
for (j=cc1; j<cc2; ++j) { \
if (*(mask++) >= MASK_THRESHOLD) { \
total += fabs(*weights_init); \
} \
++weights_init; \
} \
} \
_norm_total[0] = total
#define min(x,y) (x<y?x:y)
#define max(x,y) (x>y?x:y)
void dot_product(double mask[], double X[], double strength, int icols,
double temp_act[], PyObject* cfs, int num_cfs, PyObject* cf_type) {
DECLARE_SLOT_OFFSET(weights,cf_type);
DECLARE_SLOT_OFFSET(input_sheet_slice,cf_type);
int r, i, j;
#pragma omp parallel for schedule(guided, 8)
for (r=0; r<num_cfs; ++r) {
if(mask[r] == 0.0) {
temp_act[r] = 0;
} else {
PyObject *cf = PyList_GetItem(cfs,r);
LOOKUP_FROM_SLOT_OFFSET_UNDECL_DATA(float,weights,cf);
char *data = weights_obj->data;
int s0 = weights_obj->strides[0];
int s1 = weights_obj->strides[1];
LOOKUP_FROM_SLOT_OFFSET(int,input_sheet_slice,cf);
UNPACK_FOUR_TUPLE(int,rr1,rr2,cc1,cc2,input_sheet_slice);
double tot = 0.0;
double *xj = X+icols*rr1+cc1;
// computes the dot product
for (i=rr1; i<rr2; ++i) {
double *xi = xj;
for (j=cc1; j<cc2; ++j) {
tot += *((float *)(data + (i-rr1)*s0 + (j-cc1)*s1)) * *xi;
++xi;
}
xj += icols;
}
temp_act[r] = tot*strength;
}
}
}
void euclidean_response(double input_activity[], double strength, int icols,
double temp_act[], PyObject* cfs, int num_cfs) {
double *tact = temp_act;
double max_dist=0.0;
int r;
for (r=0; r<num_cfs; ++r) {
PyObject *cf = PyList_GetItem(cfs,r);
PyObject *weights_obj = PyObject_GetAttrString(cf,"weights");
PyObject *slice_obj = PyObject_GetAttrString(cf,"input_sheet_slice");
float *wj = (float *)(((PyArrayObject*)weights_obj)->data);
int *slice = (int *)(((PyArrayObject*)slice_obj)->data);
int rr1 = *slice++;
int rr2 = *slice++;
int cc1 = *slice++;
int cc2 = *slice;
double *xj = input_activity+icols*rr1+cc1;
int i, j;
// computes the dot product
double tot = 0.0;
for (i=rr1; i<rr2; ++i) {
double *xi = xj;
float *wi = wj;
for (j=cc1; j<cc2; ++j) {
double diff = *wi - *xi;
tot += diff*diff;
++wi;
++xi;
}
xj += icols;
wj += cc2-cc1;
}
double euclidean_distance = sqrt(tot);
if (euclidean_distance>max_dist)
max_dist = euclidean_distance;
*tact = euclidean_distance;
++tact;
// Anything obtained with PyObject_GetAttrString must be explicitly freed
Py_DECREF(weights_obj);
Py_DECREF(slice_obj);
}
tact = temp_act;
for (r=0; r<num_cfs; ++r) {
*tact = strength*(max_dist - *tact);
++tact;
}
}
/* Learning Functions including simple Hebbian, BCM etc. */
void hebbian(double input_activity[], double output_activity[],
double sheet_mask[], const int num_cfs, const int icols,
PyObject* cfs, double single_connection_learning_rate, PyObject* cf_type) {
DECLARE_SLOT_OFFSET(weights,cf_type);
DECLARE_SLOT_OFFSET(input_sheet_slice,cf_type);
DECLARE_SLOT_OFFSET(mask,cf_type);
DECLARE_SLOT_OFFSET(_norm_total,cf_type);
DECLARE_SLOT_OFFSET(_has_norm_total,cf_type);
int r;
#pragma omp parallel for schedule(guided, 8)
for (r=0; r<num_cfs; ++r) {
double load = output_activity[r];
if (load != 0 && sheet_mask[r] != 0) {
load *= single_connection_learning_rate;
PyObject *cf = PyList_GetItem(cfs,r);
LOOKUP_FROM_SLOT_OFFSET(float,weights,cf);
LOOKUP_FROM_SLOT_OFFSET(int,input_sheet_slice,cf);
LOOKUP_FROM_SLOT_OFFSET(float,mask,cf);
UNPACK_FOUR_TUPLE(int,rr1,rr2,cc1,cc2,input_sheet_slice);
double total = 0.0;
// modify non-masked weights
double *inpj = input_activity+icols*rr1+cc1;
int i, j;
for (i=rr1; i<rr2; ++i) {
double *inpi = inpj;
for (j=cc1; j<cc2; ++j) {
// The mask is floating point, so we have to
// use a robust comparison instead of testing
// against exactly 0.0.
if (*(mask++) >= MASK_THRESHOLD) {
*weights += load * *inpi;
total += fabs(*weights);
}
++weights;
++inpi;
}
inpj += icols;
}
// store the sum of the cf's weights
LOOKUP_FROM_SLOT_OFFSET(double,_norm_total,cf);
_norm_total[0]=total;
LOOKUP_FROM_SLOT_OFFSET(int,_has_norm_total,cf);
_has_norm_total[0]=1;
}
}
}
void bcm_fixed(double input_activity[], double output_activity[], int num_cfs,
int icols, PyObject* cfs, double single_connection_learning_rate,
double unit_threshold, PyObject* cf_type) {
DECLARE_SLOT_OFFSET(weights,cf_type);
DECLARE_SLOT_OFFSET(input_sheet_slice,cf_type);
DECLARE_SLOT_OFFSET(mask,cf_type);
DECLARE_SLOT_OFFSET(_norm_total,cf_type);
DECLARE_SLOT_OFFSET(_has_norm_total,cf_type);
int r;
#pragma omp parallel for schedule(guided, 8)
for (r=0; r<num_cfs; ++r) {
double load = output_activity[r];
double unit_activity= load;
if (load != 0) {
load *= single_connection_learning_rate;
PyObject *cf = PyList_GetItem(cfs,r);
LOOKUP_FROM_SLOT_OFFSET(float,weights,cf);
LOOKUP_FROM_SLOT_OFFSET(int,input_sheet_slice,cf);
LOOKUP_FROM_SLOT_OFFSET(float,mask,cf);
UNPACK_FOUR_TUPLE(int,rr1,rr2,cc1,cc2,input_sheet_slice);
double total = 0.0;
int i, j;
// modify non-masked weights
double *inpj = input_activity+icols*rr1+cc1;
for (i=rr1; i<rr2; ++i) {
double *inpi = inpj;
for (j=cc1; j<cc2; ++j) {
// The mask is floating point, so we have to
// use a robust comparison instead of testing
// against exactly 0.0.
if (*(mask++) >= MASK_THRESHOLD) {
*weights += load * *inpi * (unit_activity - unit_threshold);
if (*weights<0) { *weights = 0;}
total += fabs(*weights);
}
++weights;
++inpi;
}
inpj += icols;
}
// store the sum of the cf's weights
LOOKUP_FROM_SLOT_OFFSET(double,_norm_total,cf);
_norm_total[0]=total;
LOOKUP_FROM_SLOT_OFFSET(int,_has_norm_total,cf);
_has_norm_total[0]=1;
}
}
}
void trace_learning(double input_activity[], double traces[], int num_cfs,
int icols, PyObject* cfs, double single_connection_learning_rate,
PyObject* cf_type) {
DECLARE_SLOT_OFFSET(weights,cf_type);
DECLARE_SLOT_OFFSET(input_sheet_slice,cf_type);
DECLARE_SLOT_OFFSET(mask,cf_type);
DECLARE_SLOT_OFFSET(_norm_total,cf_type);
DECLARE_SLOT_OFFSET(_has_norm_total,cf_type);
int r;
#pragma omp parallel for schedule(guided, 8)
for (r=0; r<num_cfs; ++r) {
double load = traces[r];
if (load != 0) {
load *= single_connection_learning_rate;
PyObject *cf = PyList_GetItem(cfs,r);
LOOKUP_FROM_SLOT_OFFSET(float,weights,cf);
LOOKUP_FROM_SLOT_OFFSET(int,input_sheet_slice,cf);
LOOKUP_FROM_SLOT_OFFSET(float,mask,cf);
UNPACK_FOUR_TUPLE(int,rr1,rr2,cc1,cc2,input_sheet_slice);
double total = 0.0;
int i, j;
// modify non-masked weights
double *inpj = input_activity+icols*rr1+cc1;
for (i=rr1; i<rr2; ++i) {
double *inpi = inpj;
for (j=cc1; j<cc2; ++j) {
// The mask is floating point, so we have to
// use a robust comparison instead of testing
// against exactly 0.0.
if (*(mask++) >= MASK_THRESHOLD) {
*weights += load * *inpi;
total += fabs(*weights);
}
++weights;
++inpi;
}
inpj += icols;
}
// store the sum of the cf's weights
LOOKUP_FROM_SLOT_OFFSET(double,_norm_total,cf);
_norm_total[0]=total;
LOOKUP_FROM_SLOT_OFFSET(int,_has_norm_total,cf);
_has_norm_total[0]=1;
}
}
}
void divisive_normalize_l1(double sheet_mask[], double active_units_mask[],
PyObject* cfs, PyObject* cf_type, int num_cfs) {
DECLARE_SLOT_OFFSET(weights,cf_type);
DECLARE_SLOT_OFFSET(input_sheet_slice,cf_type);
DECLARE_SLOT_OFFSET(_norm_total,cf_type);
DECLARE_SLOT_OFFSET(_has_norm_total,cf_type);
DECLARE_SLOT_OFFSET(mask,cf_type);
int r;
#pragma omp parallel for schedule(guided, 8)
for (r=0; r<num_cfs; ++r) {
if (active_units_mask[r] != 0 && sheet_mask[r] != 0) {
PyObject *cf = PyList_GetItem(cfs,r);
LOOKUP_FROM_SLOT_OFFSET(float,weights,cf);
LOOKUP_FROM_SLOT_OFFSET(int,input_sheet_slice,cf);
LOOKUP_FROM_SLOT_OFFSET(double,_norm_total,cf);
LOOKUP_FROM_SLOT_OFFSET(int,_has_norm_total,cf);
UNPACK_FOUR_TUPLE(int,rr1,rr2,cc1,cc2,input_sheet_slice);
// if normalized total is not available, sum the weights
if (_has_norm_total[0] == 0) {
SUM_NORM_TOTAL(cf,weights,_norm_total,rr1,rr2,cc1,cc2);
}
// normalize the weights
double factor = 1.0/_norm_total[0];
int rc = (rr2-rr1)*(cc2-cc1);
int i;
for (i=0; i<rc; ++i) {
*(weights++) *= factor;
}
// Indicate that norm_total is stale
_has_norm_total[0]=0;
}
}
}
void compute_joint_norm_totals(PyObject* projlist, double active_units_mask[],
double sheet_mask[], int num_cfs, int length,
PyObject* cf_type) {
DECLARE_SLOT_OFFSET(_norm_total,cf_type);
DECLARE_SLOT_OFFSET(_has_norm_total,cf_type);
DECLARE_SLOT_OFFSET(weights,cf_type);
DECLARE_SLOT_OFFSET(input_sheet_slice,cf_type);
DECLARE_SLOT_OFFSET(mask,cf_type);
double *x = active_units_mask;
double *m = sheet_mask;
int r, p;
for (r=0; r<num_cfs; ++r) {
double load = *x++;
double msk = *m++;
if (msk!=0 && load != 0) {
double nt = 0;
for(p=0; p<length; p++) {
PyObject *proj = PyList_GetItem(projlist,p);
PyObject *cfs = PyObject_GetAttrString(proj,"flatcfs");
PyObject *cf = PyList_GetItem(cfs,r);
LOOKUP_FROM_SLOT_OFFSET(int,_has_norm_total,cf);
LOOKUP_FROM_SLOT_OFFSET(double,_norm_total,cf);
if (_has_norm_total[0] == 0) {
LOOKUP_FROM_SLOT_OFFSET(float,weights,cf);
LOOKUP_FROM_SLOT_OFFSET(int,input_sheet_slice,cf);
UNPACK_FOUR_TUPLE(int,rr1,rr2,cc1,cc2,input_sheet_slice);
SUM_NORM_TOTAL(cf,weights,_norm_total,rr1,rr2,cc1,cc2);
}
nt += _norm_total[0];
Py_DECREF(cfs);
}
for(p=0; p<length; p++) {
PyObject *proj = PyList_GetItem(projlist,p);
PyObject *cfs = PyObject_GetAttrString(proj,"flatcfs");
PyObject *cf = PyList_GetItem(cfs,r);
LOOKUP_FROM_SLOT_OFFSET(double,_norm_total,cf);
_norm_total[0] = nt;
LOOKUP_FROM_SLOT_OFFSET(int,_has_norm_total,cf);
_has_norm_total[0] = 1;
Py_DECREF(cfs);
}
}
}
}
|
mpi_io.c |
/******************************************************************************
* INCLUDES
*****************************************************************************/
#include "../splatt_mpi.h"
#include "../io.h"
#include "../timer.h"
#include "../util.h"
#include "../ccp/ccp.h"
/******************************************************************************
* API FUNCTONS
*****************************************************************************/
int splatt_mpi_csf_load(
char const * const fname,
splatt_idx_t * nmodes,
splatt_csf ** tensors,
double const * const options,
MPI_Comm comm)
{
sptensor_t * tt = NULL;
int rank, npes;
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &npes);
return SPLATT_SUCCESS;
}
int splatt_mpi_coord_load(
char const * const fname,
splatt_idx_t * nmodes,
splatt_idx_t * nnz,
splatt_idx_t *** inds,
splatt_val_t ** vals,
double const * const options,
MPI_Comm comm)
{
sptensor_t * tt = mpi_simple_distribute(fname, comm);
if(tt == NULL) {
*nmodes = 0;
*nnz = 0;
*vals = NULL;
*inds = NULL;
return SPLATT_ERROR_BADINPUT;
}
*nmodes = tt->nmodes;
*nnz = tt->nnz;
/* copy to output */
*vals = tt->vals;
*inds = splatt_malloc(tt->nmodes * sizeof(**inds));
for(idx_t m=0; m < tt->nmodes; ++m) {
(*inds)[m] = tt->ind[m];
}
free(tt);
return SPLATT_SUCCESS;
}
/******************************************************************************
* PRIVATE FUNCTONS
*****************************************************************************/
/**
* @brief Fill buf with the next 'nnz_to_read' tensor values.
*
* @param fin The file to read from.
* @param buf The sptensor buffer to fill.
* @param nnz_to_read The number of nonzeros to read.
*/
static void p_fill_tt_nnz(
FILE * fin,
sptensor_t * const buf,
idx_t const * const offset,
idx_t const nnz_to_read)
{
idx_t const nmodes = buf->nmodes;
char * ptr = NULL;
char * line = NULL;
ssize_t read;
size_t len = 0;
idx_t nnzread = 0;
while(nnzread < nnz_to_read && (read = getline(&line, &len, fin)) != -1) {
/* skip empty and commented lines */
if(read > 1 && line[0] != '#') {
ptr = line;
for(idx_t m=0; m < nmodes; ++m) {
idx_t ind = strtoull(ptr, &ptr, 10);
buf->ind[m][nnzread] = ind - offset[m];
}
val_t const v = strtod(ptr, &ptr);
buf->vals[nnzread++] = v;
}
}
}
static int * p_distribute_parts(
sptensor_t * const ttbuf,
char const * const pfname,
rank_info * const rinfo)
{
/* root may have more than target_nnz */
idx_t const target_nnz = rinfo->global_nnz / rinfo->npes;
int * parts = (int *) splatt_malloc(SS_MAX(ttbuf->nnz, target_nnz) * sizeof(int));
if(rinfo->rank == 0) {
int ret;
FILE * fin = open_f(pfname, "r");
/* send to all other ranks */
for(int p=1; p < rinfo->npes; ++p) {
/* read into buffer */
for(idx_t n=0; n < target_nnz; ++n) {
if((ret = fscanf(fin, "%d", &(parts[n]))) == 0) {
fprintf(stderr, "SPLATT ERROR: not enough elements in '%s'\n",
pfname);
exit(1);
}
}
MPI_Send(parts, target_nnz, MPI_INT, p, 0, rinfo->comm_3d);
}
/* now read my own part info */
for(idx_t n=0; n < ttbuf->nnz; ++n) {
if((ret = fscanf(fin, "%d", &(parts[n]))) == 0) {
fprintf(stderr, "SPLATT ERROR: not enough elements in '%s'\n",
pfname);
exit(1);
}
}
fclose(fin);
} else {
/* receive part info */
MPI_Recv(parts, ttbuf->nnz, MPI_INT, 0, 0, rinfo->comm_3d,
&(rinfo->status));
}
return parts;
}
static void p_find_my_slices_1d(
idx_t ** const ssizes,
idx_t const nmodes,
idx_t const nnz,
rank_info * const rinfo)
{
idx_t const * const dims = rinfo->global_dims;
/* find start/end slices for my partition */
for(idx_t m=0; m < nmodes; ++m) {
/* current processor */
int currp = 0;
idx_t lastn = 0;
idx_t nnzcnt = 0;
idx_t pnnz = nnz / rinfo->npes;
rinfo->layer_starts[m] = 0;
rinfo->layer_ends[m] = dims[m];
rinfo->mat_start[m] = 0;
rinfo->mat_end[m] = dims[m];
for(idx_t s=0; s < dims[m]; ++s) {
if(nnzcnt >= lastn + pnnz) {
/* choose this slice or the previous, whichever is closer */
if(s > 0) {
idx_t const thisdist = nnzcnt - (lastn + pnnz);
idx_t const prevdist = (lastn + pnnz) - (nnzcnt - ssizes[m][s-1]);
if(prevdist < thisdist) {
lastn = nnzcnt - ssizes[m][s-1];
} else {
lastn = nnzcnt;
}
} else {
lastn = nnzcnt;
}
++currp;
/* adjust target nnz based on what is left */
pnnz = (nnz - lastn) / SS_MAX(1, rinfo->npes - currp);
if(currp == rinfo->rank) {
rinfo->mat_start[m] = s;
} else if(currp == rinfo->rank+1 && currp != rinfo->npes) {
/* only set mat_end if we aren't at the end of the tensor */
rinfo->mat_end[m] = s;
break;
}
}
nnzcnt += ssizes[m][s];
if(rinfo->rank == rinfo->npes-1) {
assert(rinfo->mat_end[m] == rinfo->global_dims[m]);
}
}
/* it is possible to have a very small dimension and too many ranks */
if(rinfo->npes > 1 && rinfo->mat_start[m] == 0
&& rinfo->mat_end[m] == dims[m]) {
fprintf(stderr, "SPLATT: rank: %d too many MPI ranks for mode %"\
SPLATT_PF_IDX".\n", rinfo->rank, m+1);
rinfo->mat_start[m] = dims[m];
rinfo->mat_end[m] = dims[m];
}
}
}
/**
* @brief Count the nonzero values in a partition of X.
*
* @param fname The name of the file containing X.
* @param nmodes The number of modes of X.
*
* @return The number of nonzeros in the intersection of all sstarts and sends.
*/
static idx_t p_count_my_nnz_1d(
char const * const fname,
idx_t const nmodes,
idx_t const * const sstarts,
idx_t const * const sends)
{
FILE * fin = open_f(fname, "r");
char * ptr = NULL;
char * line = NULL;
ssize_t read;
size_t len = 0;
/* count nnz in my partition */
idx_t mynnz = 0;
while((read = getline(&line, &len, fin)) != -1) {
/* skip empty and commented lines */
if(read > 1 && line[0] != '#') {
int mine = 0;
ptr = line;
for(idx_t m=0; m < nmodes; ++m) {
idx_t ind = strtoull(ptr, &ptr, 10) - 1;
/* I own the nnz if it falls in any of my slices */
if(ind >= sstarts[m] && ind < sends[m]) {
mine = 1;
break;
}
}
if(mine) {
++mynnz;
}
/* skip over tensor val */
strtod(ptr, &ptr);
}
}
fclose(fin);
free(line);
return mynnz;
}
/**
* @brief Read a partition of X into tt.
*
* @param fname The file containing X.
* @param tt The tensor structure (must be pre-allocated).
* @param sstarts Array of starting slices, inclusive (one for each mode).
* @param sends Array of ending slices, exclusive (one for each mode).
*/
static void p_read_tt_part_1d(
char const * const fname,
sptensor_t * const tt,
idx_t const * const sstarts,
idx_t const * const sends)
{
idx_t const nnz = tt->nnz;
idx_t const nmodes = tt->nmodes;
char * ptr = NULL;
char * line = NULL;
ssize_t read;
size_t len = 0;
FILE * fin = open_f(fname, "r");
idx_t nnzread = 0;
while(nnzread < nnz && (read = getline(&line, &len, fin)) != -1) {
/* skip empty and commented lines */
if(read > 1 && line[0] != '#') {
int mine = 0;
ptr = line;
for(idx_t m=0; m < nmodes; ++m) {
idx_t ind = strtoull(ptr, &ptr, 10) - 1;
tt->ind[m][nnzread] = ind;
if(ind >= sstarts[m] && ind < sends[m]) {
mine = 1;
}
}
tt->vals[nnzread] = strtod(ptr, &ptr);
if(mine) {
++nnzread;
}
}
}
fclose(fin);
free(line);
}
/**
* @brief Read my portion of X from a file.
*
* @param fname The file containing X.
* @param ssizes The nonzero counts in each slice.
* @param nmodes The number of modes in X.
* @param rinfo MPI information (nnz, 3D comm, etc.).
*
* @return My portion of the sparse tensor read from fname.
*/
static sptensor_t * p_rearrange_coarse(
sptensor_t * const ttbuf,
idx_t ** const ssizes,
rank_info * const rinfo)
{
int const rank = rinfo->rank;
int const npes = rinfo->npes;
idx_t const nnz = rinfo->global_nnz;
idx_t const * const dims = rinfo->global_dims;
idx_t const nmodes = ttbuf->nmodes;
idx_t const maxdim = dims[argmax_elem(dims, nmodes)];
idx_t * weights = splatt_malloc(maxdim * sizeof(*weights));
idx_t * coarse_parts[MAX_NMODES];
/* find start/end slices for my partition */
for(idx_t m=0; m < nmodes; ++m) {
/* copy ssizes[m] because a prefix sum will be performed */
memcpy(weights, ssizes[m], dims[m] * sizeof(*weights));
/* find a balanced partitioning of slices */
coarse_parts[m] = splatt_malloc((npes+1) * sizeof(**coarse_parts));
partition_1d(weights, dims[m], coarse_parts[m], npes);
rinfo->layer_starts[m] = 0;
rinfo->layer_ends[m] = dims[m];
/* store partition information */
rinfo->mat_start[m] = coarse_parts[m][rank];
rinfo->mat_end[m] = coarse_parts[m][rank+1];
}
splatt_free(weights);
int * parts = splatt_malloc(ttbuf->nnz * sizeof(*parts));
/* we repeatedly merge into this */
sptensor_t * ret = tt_alloc(0, nmodes);
for(idx_t m=0; m < nmodes; ++m) {
/* determine owners of all my nnz */
#pragma omp parallel for schedule(static)
for(idx_t n=0; n < ttbuf->nnz; ++n) {
idx_t const idx = ttbuf->ind[m][n];
for(int p=0; p < npes; ++p) {
if(idx >= coarse_parts[m][p] && idx < coarse_parts[m][p+1]) {
parts[n] = p;
break;
}
}
}
sptensor_t * tt_mode = mpi_rearrange_by_part(ttbuf, parts, rinfo->comm_3d);
#ifdef SPLATT_DEBUG
/* sanity check on nnz -- this can be expensive */
assert(tt_remove_dups(tt_mode) == 0);
idx_t totnnz;
MPI_Reduce(&(tt_mode->nnz), &totnnz, 1, SPLATT_MPI_IDX, MPI_SUM, 0,
rinfo->comm_3d);
if(rank == 0) {
assert(totnnz == rinfo->global_nnz);
}
#endif
/* save the new unioned tensor and clean up */
sptensor_t * tt_merged = tt_union(ret, tt_mode);
#ifdef SPLATT_DEBUG
assert(tt_remove_dups(tt_merged) == 0);
#endif
tt_free(ret);
ret = tt_merged;
tt_free(tt_mode);
}
splatt_free(parts);
for(idx_t m=0; m < nmodes; ++m) {
splatt_free(coarse_parts[m]);
}
return ret;
}
/**
* @brief Find the boundaries for a process layer.
*
* @param ssizes The number of nonzeros found in each index (of each mode).
* ssizes[1][5] is the number of nonzeros in X(:,5,:).
* @param mode Which mode to work on.
* @param rinfo MPI rank information.
*/
static void p_find_layer_boundaries(
idx_t ** const ssizes,
idx_t const mode,
rank_info * const rinfo)
{
idx_t const * const dims = rinfo->global_dims;
idx_t const nnz = rinfo->global_nnz;
idx_t const m = mode;
/* find start/end slices for my partition */
int const layer_dim = rinfo->dims_3d[m];
idx_t pnnz = nnz / layer_dim; /* nnz in a layer */
/* current processor */
int currp = 0;
idx_t lastn = 0;
idx_t nnzcnt = ssizes[m][0];
/* initialize layer_ptrs */
rinfo->layer_ptrs[m]
= splatt_malloc((layer_dim+1) * sizeof(**(rinfo->layer_ptrs)));
rinfo->layer_ptrs[m][currp++] = 0;
rinfo->layer_ptrs[m][layer_dim] = dims[m];
if(layer_dim == 1) {
goto CLEANUP;
return;
}
/* foreach slice */
for(idx_t s=1; s < dims[m]; ++s) {
/* if we have passed the next layer boundary */
if(nnzcnt >= lastn + pnnz) {
/* choose this slice or the previous, whichever is closer */
idx_t const thisdist = nnzcnt - (lastn + pnnz);
idx_t const prevdist = (lastn + pnnz) - (nnzcnt - ssizes[m][s-1]);
if(prevdist < thisdist) {
lastn = nnzcnt - ssizes[m][s-1];
/* see below comment */
//rinfo->layer_ptrs[m][currp++] = s-1;
} else {
lastn = nnzcnt;
//rinfo->layer_ptrs[m][currp++] = s;
}
/* Always choosing s but marking lastn with s-1 leads to better balance
* and communication volume. This is totally a heuristic. */
rinfo->layer_ptrs[m][currp++] = s;
/* exit early if we placed the last rank */
if(currp == layer_dim) {
break;
}
/* adjust target nnz based on what is left */
pnnz = (nnz - lastn) / SS_MAX(1, layer_dim - (currp-1));
}
nnzcnt += ssizes[m][s];
}
for( ; currp < layer_dim; ++currp) {
rinfo->layer_ptrs[m][currp] = dims[m];
}
CLEANUP:
/* store layer bounderies in layer_{starts, ends} */
rinfo->layer_starts[m] = rinfo->layer_ptrs[m][rinfo->coords_3d[m]];
rinfo->layer_ends[m] = rinfo->layer_ptrs[m][rinfo->coords_3d[m] + 1];
/* it is possible to have a very small dimension and too many ranks */
if(rinfo->dims_3d[m] > 1 &&
rinfo->layer_ends[m] - rinfo->layer_starts[m] == dims[m]) {
fprintf(stderr, "SPLATT: rank: %d too many MPI ranks for mode %"\
SPLATT_PF_IDX".\n", rinfo->rank, m+1);
rinfo->layer_starts[m] = dims[m];
rinfo->layer_ends[m] = dims[m];
}
}
/**
* @brief Rearrange nonzeros according to a medium-grained decomposition.
*
* @param ttbuf The tensor to rearrange.
* @param ssizes The number of nonzeros found in each index.
* @param rinfo MPI rank information.
*
* @return My owned tensor nonzeros.
*/
static sptensor_t * p_rearrange_medium(
sptensor_t * const ttbuf,
idx_t * * ssizes,
rank_info * const rinfo)
{
#pragma omp parallel for schedule(static, 1)
for(idx_t m=0; m < ttbuf->nmodes; ++m) {
p_find_layer_boundaries(ssizes, m, rinfo);
}
/* create partitioning */
int * parts = splatt_malloc(ttbuf->nnz * sizeof(*parts));
#pragma omp parallel for schedule(static)
for(idx_t n=0; n < ttbuf->nnz; ++n) {
parts[n] = mpi_determine_med_owner(ttbuf, n, rinfo);
}
sptensor_t * tt = mpi_rearrange_by_part(ttbuf, parts, rinfo->comm_3d);
splatt_free(parts);
return tt;
}
/**
* @brief Rearrange nonzeros according to a medium-grained decomposition.
*
* @param ttbuf The tensor to rearrange.
* @param pfname The filename containing the partitioning information.
* @param ssizes The number of nonzeros found in each index.
* @param rinfo MPI rank information.
*
* @return My owned tensor nonzeros.
*/
static sptensor_t * p_rearrange_fine(
sptensor_t * const ttbuf,
char const * const pfname,
idx_t * * ssizes,
rank_info * const rinfo)
{
/* first distribute partitioning information */
int * parts = p_distribute_parts(ttbuf, pfname, rinfo);
sptensor_t * tt = mpi_rearrange_by_part(ttbuf, parts, rinfo->comm_3d);
free(parts);
return tt;
}
/**
* @brief Count the nonzeros in each slice of X.
*
* @param tt My subtensor.
* @param ssizes A 2D array for counting slice 'sizes'.
* @param rinfo MPI information (containing global dims, nnz, etc.).
*/
static void p_fill_ssizes(
sptensor_t const * const tt,
idx_t ** const ssizes,
rank_info const * const rinfo)
{
#pragma omp parallel
{
for(idx_t m=0; m < tt->nmodes; ++m) {
idx_t const * const ind = tt->ind[m];
#pragma omp for schedule(static)
for(idx_t n=0; n < tt->nnz; ++n) {
#pragma omp atomic
ssizes[m][ind[n]] += 1;
}
/* reduce to get total slice counts */
#pragma omp master
MPI_Allreduce(MPI_IN_PLACE, ssizes[m], (int) rinfo->global_dims[m],
SPLATT_MPI_IDX, MPI_SUM, rinfo->comm_3d);
}
} /* omp parallel */
}
/**
* @brief Fill in the best MPI dimensions we can find. The truly optimal
* solution should involve the tensor's sparsity pattern, but in general
* this works as good (but usually better) than the hand-tuned dimensions
* that we tried.
*
* @param rinfo MPI rank information.
*/
static void p_get_best_mpi_dim(
rank_info * const rinfo)
{
int nprimes = 0;
int * primes = get_primes(rinfo->npes, &nprimes);
idx_t total_size = 0;
for(idx_t m=0; m < rinfo->nmodes; ++m) {
total_size += rinfo->global_dims[m];
/* reset mpi dims */
rinfo->dims_3d[m] = 1;
}
idx_t target = total_size / (idx_t)rinfo->npes;
idx_t diffs[MAX_NMODES];
/* start from the largest prime */
for(int p = nprimes-1; p >= 0; --p) {
int furthest = 0;
/* find dim furthest from target */
for(idx_t m=0; m < rinfo->nmodes; ++m) {
/* distance is current - target */
idx_t const curr = rinfo->global_dims[m] / rinfo->dims_3d[m];
/* avoid underflow */
diffs[m] = (curr > target) ? (curr - target) : 0;
if(diffs[m] > diffs[furthest]) {
furthest = m;
}
}
/* assign p processes to furthest mode */
rinfo->dims_3d[furthest] *= primes[p];
}
splatt_free(primes);
}
/**
* @brief Read a sparse tensor in coordinate form from a text file and
* and distribute among MPI ranks.
*
* @param fin The file to read from.
* @param comm The MPI communicator to distribute among.
*
* @return The sparse tensor.
*/
static sptensor_t * p_tt_mpi_read_file(
FILE * fin,
MPI_Comm comm)
{
int rank, npes;
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &npes);
idx_t dims[MAX_NMODES];
idx_t offsets[MAX_NMODES];
idx_t global_nnz;
idx_t nmodes;
sptensor_t * tt = NULL;
if(rank == 0) {
/* send dimension info */
tt_get_dims(fin, &nmodes, &global_nnz, dims, offsets);
rewind(fin);
MPI_Bcast(&nmodes, 1, SPLATT_MPI_IDX, 0, comm);
MPI_Bcast(&global_nnz, 1, SPLATT_MPI_IDX, 0, comm);
} else {
MPI_Bcast(&nmodes, 1, SPLATT_MPI_IDX, 0, comm);
MPI_Bcast(&global_nnz, 1, SPLATT_MPI_IDX, 0, comm);
}
/* compute my even chunk of nonzeros -- root rank gets the extra amount */
idx_t const target_nnz = global_nnz / npes;
idx_t my_nnz = target_nnz;
if(rank == 0) {
my_nnz = global_nnz - ((npes-1) * my_nnz);
}
/* read/send all chunks */
if(rank == 0) {
sptensor_t * tt_buf = tt_alloc(target_nnz, nmodes);
/* now send to everyone else */
for(int p=1; p < npes; ++p) {
p_fill_tt_nnz(fin, tt_buf, offsets, target_nnz);
for(idx_t m=0; m < tt_buf->nmodes; ++m) {
MPI_Send(tt_buf->ind[m], target_nnz, SPLATT_MPI_IDX, p, m, comm);
}
MPI_Send(tt_buf->vals, target_nnz, SPLATT_MPI_VAL, p, nmodes, comm);
}
tt_free(tt_buf);
/* load my own */
tt = tt_alloc(my_nnz, nmodes);
p_fill_tt_nnz(fin, tt, offsets, my_nnz);
} else {
MPI_Status status;
/* receive my chunk */
tt = tt_alloc(my_nnz, nmodes);
for(idx_t m=0; m < tt->nmodes; ++m) {
MPI_Recv(tt->ind[m], my_nnz, SPLATT_MPI_IDX, 0, m, comm, &status);
}
MPI_Recv(tt->vals, my_nnz, SPLATT_MPI_VAL, 0, nmodes, comm, &status);
}
return tt;
}
/**
* @brief Read a sparse tensor in coordinate form from a binary file and
* distribute among MPI ranks.
*
* @param fin The file to read from.
* @param comm The MPI communicator to distribute among.
*
* @return The sparse tensor.
*/
static sptensor_t * p_tt_mpi_read_binary_file(
FILE * fin,
MPI_Comm comm)
{
sptensor_t * tt = NULL;
int rank, npes;
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &npes);
idx_t global_nnz;
idx_t nmodes;
idx_t dims[MAX_NMODES];
/* get header and tensor stats */
bin_header header;
if(rank == 0) {
read_binary_header(fin, &header);
fill_binary_idx(&nmodes, 1, &header, fin);
fill_binary_idx(dims, nmodes, &header, fin);
fill_binary_idx(&global_nnz, 1, &header, fin);
}
/* send dimension info */
if(rank == 0) {
MPI_Bcast(&nmodes, 1, SPLATT_MPI_IDX, 0, comm);
MPI_Bcast(&global_nnz, 1, SPLATT_MPI_IDX, 0, comm);
} else {
MPI_Bcast(&nmodes, 1, SPLATT_MPI_IDX, 0, comm);
MPI_Bcast(&global_nnz, 1, SPLATT_MPI_IDX, 0, comm);
}
/* sanity check */
if(nmodes > MAX_NMODES) {
if(rank == 0) {
fprintf(stderr, "SPLATT ERROR: maximum %"SPLATT_PF_IDX" modes supported. "
"Found %"SPLATT_PF_IDX". Please recompile with "
"MAX_NMODES=%"SPLATT_PF_IDX".\n",
MAX_NMODES, nmodes, nmodes);
}
return NULL;
}
/* compute my even chunk of nonzeros -- root rank gets the extra amount */
idx_t const target_nnz = global_nnz / npes;
idx_t my_nnz = target_nnz;
if(rank == 0) {
my_nnz = global_nnz - ((npes-1)* target_nnz);
}
tt = tt_alloc(my_nnz, nmodes);
/* read/send all chunks */
if(rank == 0) {
/* handle inds */
idx_t * ibuf = splatt_malloc(target_nnz * sizeof(idx_t));
for(idx_t m=0; m < nmodes; ++m) {
for(int p=1; p < npes; ++p) {
fill_binary_idx(ibuf, target_nnz, &header, fin);
MPI_Send(ibuf, target_nnz, SPLATT_MPI_IDX, p, m, comm);
}
/* load my own */
fill_binary_idx(tt->ind[m], my_nnz, &header, fin);
}
splatt_free(ibuf);
/* now vals */
val_t * vbuf = splatt_malloc(target_nnz * sizeof(val_t));
for(int p=1; p < npes; ++p) {
fill_binary_val(vbuf, target_nnz, &header, fin);
MPI_Send(vbuf, target_nnz, SPLATT_MPI_VAL, p, nmodes, comm);
}
splatt_free(vbuf);
/* finally, load my own vals */
fill_binary_val(tt->vals, my_nnz, &header, fin);
} else {
/* non-root ranks just recv */
MPI_Status status;
/* receive my chunk */
for(idx_t m=0; m < tt->nmodes; ++m) {
MPI_Recv(tt->ind[m], my_nnz, SPLATT_MPI_IDX, 0, m, comm, &status);
}
MPI_Recv(tt->vals, my_nnz, SPLATT_MPI_VAL, 0, nmodes, comm, &status);
}
return tt;
}
/******************************************************************************
* PUBLIC FUNCTONS
*****************************************************************************/
sptensor_t * mpi_tt_read(
char const * const ifname,
char const * const pfname,
rank_info * const rinfo)
{
timer_start(&timers[TIMER_IO]);
/* first just make sure it exists */
FILE * fin;
if((fin = fopen(ifname, "r")) == NULL) {
if(rinfo->rank == 0) {
fprintf(stderr, "SPLATT ERROR: failed to open '%s'\n", ifname);
}
return NULL;
}
fclose(fin);
/* first naively distribute tensor nonzeros for analysis */
sptensor_t * ttbuf = mpi_simple_distribute(ifname, MPI_COMM_WORLD);
rinfo->nmodes = ttbuf->nmodes;
MPI_Allreduce(&(ttbuf->nnz), &(rinfo->global_nnz), 1, SPLATT_MPI_IDX,
MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(ttbuf->dims, &(rinfo->global_dims), ttbuf->nmodes,
SPLATT_MPI_IDX, MPI_MAX, MPI_COMM_WORLD);
/* first compute MPI dimension if not specified by the user */
if(rinfo->decomp == DEFAULT_MPI_DISTRIBUTION) {
rinfo->decomp = SPLATT_DECOMP_MEDIUM;
p_get_best_mpi_dim(rinfo);
}
mpi_setup_comms(rinfo);
/* count # nonzeros found in each index */
idx_t * ssizes[MAX_NMODES];
for(idx_t m=0; m < ttbuf->nmodes; ++m) {
ssizes[m] = (idx_t *) calloc(rinfo->global_dims[m], sizeof(idx_t));
}
p_fill_ssizes(ttbuf, ssizes, rinfo);
/* actually parse tensor */
sptensor_t * tt = NULL;
switch(rinfo->decomp) {
case SPLATT_DECOMP_COARSE:
tt = p_rearrange_coarse(ttbuf, ssizes, rinfo);
tt_fill_dims(tt);
break;
case SPLATT_DECOMP_MEDIUM:
tt = p_rearrange_medium(ttbuf, ssizes, rinfo);
/* now map tensor indices to local (layer) coordinates and fill in dims */
#pragma omp parallel
for(idx_t m=0; m < ttbuf->nmodes; ++m) {
#pragma omp master
tt->dims[m] = rinfo->layer_ends[m] - rinfo->layer_starts[m];
#pragma omp for schedule(static) nowait
for(idx_t n=0; n < tt->nnz; ++n) {
assert(tt->ind[m][n] >= rinfo->layer_starts[m]);
assert(tt->ind[m][n] < rinfo->layer_ends[m]);
tt->ind[m][n] -= rinfo->layer_starts[m];
}
}
break;
case SPLATT_DECOMP_FINE:
tt = p_rearrange_fine(ttbuf, pfname, ssizes, rinfo);
/* now fix tt->dims */
for(idx_t m=0; m < tt->nmodes; ++m) {
tt->dims[m] = rinfo->global_dims[m];
rinfo->layer_ends[m] = tt->dims[m];
}
break;
}
for(idx_t m=0; m < ttbuf->nmodes; ++m) {
free(ssizes[m]);
}
tt_free(ttbuf);
timer_stop(&timers[TIMER_IO]);
return tt;
}
sptensor_t * mpi_filter_tt_1d(
sptensor_t const * const tt,
idx_t const mode,
idx_t start,
idx_t end)
{
sptensor_t * ftt = tt_alloc(tt->nnz, tt->nmodes);
for(idx_t m=0; m < ftt->nmodes; ++m) {
ftt->dims[m] = tt->dims[m];
}
/* Adjust start and end if tt has been compressed. */
assert(start < end);
/* TODO: change this linear search into a binary one */
if(tt->indmap[mode] != NULL) {
for(idx_t i=0; i < tt->dims[mode]; ++i) {
if(tt->indmap[mode][i] == start) {
start = i;
}
if(tt->indmap[mode][i]+1 == end) {
end = i+1;
break;
}
}
}
idx_t nnz = 0;
for(idx_t n=0; n < tt->nnz; ++n) {
/* Copy the nonzero if we own the slice. */
if(tt->ind[mode][n] >= start && tt->ind[mode][n] < end) {
for(idx_t m=0; m < tt->nmodes; ++m) {
ftt->ind[m][nnz] = tt->ind[m][n];
}
ftt->vals[nnz++] = tt->vals[n];
}
}
/* update ftt dimensions and nnz */
ftt->nnz = nnz;
ftt->dims[mode] = end - start;
/* now map mode coords to [0, end-start) */
#pragma omp parallel for schedule(static)
for(idx_t n=0; n < ftt->nnz; ++n) {
assert(ftt->ind[mode][n] >= start);
assert(ftt->ind[mode][n] < end);
ftt->ind[mode][n] -= start;
}
/* create new indmap for mode */
for(idx_t m=0; m < tt->nmodes; ++m) {
if(tt->indmap[m] == NULL) {
break;
}
ftt->indmap[m] = (idx_t *) realloc(ftt->indmap[m],
ftt->dims[m] * sizeof(idx_t));
/* mode indices are shifted. otherwise just copy */
if(m == mode) {
#pragma omp parallel for
for(idx_t i=0; i < ftt->dims[mode]; ++i) {
ftt->indmap[mode][i] = tt->indmap[mode][i+start];
}
} else {
par_memcpy(ftt->indmap[m], tt->indmap[m], tt->dims[m] * sizeof(idx_t));
}
}
/* sanity check */
for(idx_t i=0; i < ftt->dims[mode]; ++i) {
assert(i + start < end);
}
for(idx_t n=0; n < ftt->nnz; ++n) {
assert(ftt->ind[mode][n] < end - start);
}
return ftt;
}
void mpi_write_mats(
matrix_t ** mats,
permutation_t const * const perm,
rank_info const * const rinfo,
char const * const basename,
idx_t const nmodes)
{
char * fname;
idx_t const nfactors = mats[0]->J;
MPI_Status status;
idx_t maxdim = 0;
idx_t maxlocaldim = 0;
matrix_t * matbuf = NULL;
val_t * vbuf = NULL;
idx_t * loc_iperm = NULL;
for(idx_t m=0; m < nmodes; ++m) {
maxdim = SS_MAX(maxdim, rinfo->global_dims[m]);
maxlocaldim = SS_MAX(maxlocaldim, mats[m]->I);
}
/* get the largest local dim */
if(rinfo->rank == 0) {
MPI_Reduce(MPI_IN_PLACE, &maxlocaldim, 1, SPLATT_MPI_IDX, MPI_MAX, 0,
rinfo->comm_3d);
} else {
MPI_Reduce(&maxlocaldim, NULL, 1, SPLATT_MPI_IDX, MPI_MAX, 0,
rinfo->comm_3d);
}
if(rinfo->rank == 0) {
matbuf = mat_alloc(maxdim, nfactors);
loc_iperm = (idx_t *) splatt_malloc(maxdim * sizeof(idx_t));
vbuf = (val_t *) splatt_malloc(maxdim * nfactors * sizeof(val_t));
}
for(idx_t m=0; m < nmodes; ++m) {
/* root handles the writing */
if(rinfo->rank == 0) {
asprintf(&fname, "%s%"SPLATT_PF_IDX".mat", basename, m+1);
matbuf->I = rinfo->global_dims[m];
/* copy root's matrix to buffer */
for(idx_t i=0; i < mats[m]->I; ++i) {
idx_t const gi = rinfo->layer_starts[m] + perm->iperms[m][i];
for(idx_t f=0; f < nfactors; ++f) {
matbuf->vals[f + (gi*nfactors)] = mats[m]->vals[f+(i*nfactors)];
}
}
/* receive matrix from each rank */
for(int p=1; p < rinfo->npes; ++p) {
idx_t layerstart;
idx_t nrows;
MPI_Recv(&layerstart, 1, SPLATT_MPI_IDX, p, 0, rinfo->comm_3d, &status);
MPI_Recv(&nrows, 1, SPLATT_MPI_IDX, p, 0, rinfo->comm_3d, &status);
MPI_Recv(vbuf, nrows * nfactors, SPLATT_MPI_VAL, p, 0, rinfo->comm_3d,
&status);
MPI_Recv(loc_iperm, nrows, SPLATT_MPI_IDX, p, 0, rinfo->comm_3d, &status);
/* permute buffer and copy into matbuf */
for(idx_t i=0; i < nrows; ++i) {
idx_t const gi = layerstart + loc_iperm[i];
for(idx_t f=0; f < nfactors; ++f) {
matbuf->vals[f + (gi*nfactors)] = vbuf[f+(i*nfactors)];
}
}
}
/* write the factor matrix to disk */
mat_write(matbuf, fname);
/* clean up */
free(fname);
} else {
/* send matrix to root */
MPI_Send(&(rinfo->layer_starts[m]), 1, SPLATT_MPI_IDX, 0, 0, rinfo->comm_3d);
MPI_Send(&(mats[m]->I), 1, SPLATT_MPI_IDX, 0, 0, rinfo->comm_3d);
MPI_Send(mats[m]->vals, mats[m]->I * mats[m]->J, SPLATT_MPI_VAL, 0, 0,
rinfo->comm_3d);
MPI_Send(perm->iperms[m] + rinfo->mat_start[m], mats[m]->I, SPLATT_MPI_IDX,
0, 0, rinfo->comm_3d);
}
} /* foreach mode */
if(rinfo->rank == 0) {
mat_free(matbuf);
free(vbuf);
free(loc_iperm);
}
}
void mpi_write_part(
sptensor_t const * const tt,
permutation_t const * const perm,
rank_info const * const rinfo)
{
/* file name is <rank>.part */
char name[256];
sprintf(name, "%d.part", rinfo->rank);
FILE * fout = open_f(name, "w");
for(idx_t n=0; n < tt->nnz; ++n) {
for(idx_t m=0; m < tt->nmodes; ++m) {
/* map idx to original global coordinate */
idx_t idx = tt->ind[m][n];
if(tt->indmap[m] != NULL) {
idx = tt->indmap[m][idx];
}
if(perm->iperms[m] != NULL) {
idx = perm->iperms[m][idx];
}
/* write index */
fprintf(fout, "%"SPLATT_PF_IDX" ", 1+idx);
}
fprintf(fout, "%"SPLATT_PF_VAL"\n", tt->vals[n]);
}
fclose(fout);
}
sptensor_t * mpi_simple_distribute(
char const * const ifname,
MPI_Comm comm)
{
int rank, npes;
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &npes);
sptensor_t * tt = NULL;
FILE * fin = NULL;
if(rank == 0) {
fin = open_f(ifname, "r");
}
switch(get_file_type(ifname)) {
case SPLATT_FILE_TEXT_COORD:
tt = p_tt_mpi_read_file(fin, comm);
break;
case SPLATT_FILE_BIN_COORD:
tt = p_tt_mpi_read_binary_file(fin, comm);
break;
}
if(rank == 0) {
fclose(fin);
}
tt_fill_dims(tt);
return tt;
}
matrix_t * mpi_mat_rand(
idx_t const mode,
idx_t const nfactors,
permutation_t const * const perm,
rank_info * const rinfo)
{
idx_t const localdim = rinfo->mat_end[mode] - rinfo->mat_start[mode];
matrix_t * mymat = mat_alloc(localdim, nfactors);
MPI_Status status;
/* figure out buffer sizes */
idx_t maxlocaldim = localdim;
if(rinfo->rank == 0) {
MPI_Reduce(MPI_IN_PLACE, &maxlocaldim, 1, SPLATT_MPI_IDX, MPI_MAX, 0,
rinfo->comm_3d);
} else {
MPI_Reduce(&maxlocaldim, NULL, 1, SPLATT_MPI_IDX, MPI_MAX, 0,
rinfo->comm_3d);
}
/* root rank does the heavy lifting */
if(rinfo->rank == 0) {
/* allocate buffers */
idx_t * loc_perm = splatt_malloc(maxlocaldim * sizeof(*loc_perm));
val_t * vbuf = splatt_malloc(maxlocaldim * nfactors * sizeof(*vbuf));
/* allocate initial factor */
matrix_t * full_factor = mat_rand(rinfo->global_dims[mode], nfactors);
/* copy root's own matrix to output */
#pragma omp parallel for schedule(static)
for(idx_t i=0; i < localdim; ++i) {
idx_t const gi = rinfo->mat_start[mode] + perm->iperms[mode][i];
for(idx_t f=0; f < nfactors; ++f) {
mymat->vals[f + (i*nfactors)] = full_factor->vals[f+(gi*nfactors)];
}
}
/* communicate! */
for(int p=1; p < rinfo->npes; ++p) {
/* first receive layer start and permutation info */
idx_t layerstart;
idx_t nrows;
MPI_Recv(&layerstart, 1, SPLATT_MPI_IDX, p, 0, rinfo->comm_3d, &status);
MPI_Recv(&nrows, 1, SPLATT_MPI_IDX, p, 1, rinfo->comm_3d, &status);
MPI_Recv(loc_perm, nrows, SPLATT_MPI_IDX, p, 2, rinfo->comm_3d, &status);
/* fill buffer */
#pragma omp parallel for schedule(static)
for(idx_t i=0; i < nrows; ++i) {
idx_t const gi = layerstart + loc_perm[i];
for(idx_t f=0; f < nfactors; ++f) {
vbuf[f + (i*nfactors)] = full_factor->vals[f+(gi*nfactors)];
}
}
/* send to rank p */
MPI_Send(vbuf, nrows * nfactors, SPLATT_MPI_VAL, p, 3, rinfo->comm_3d);
}
mat_free(full_factor);
splatt_free(loc_perm);
splatt_free(vbuf);
/* other ranks just send/recv */
} else {
/* send permutation info to root */
MPI_Send(&(rinfo->layer_starts[mode]), 1, SPLATT_MPI_IDX, 0, 0, rinfo->comm_3d);
MPI_Send(&localdim, 1, SPLATT_MPI_IDX, 0, 1, rinfo->comm_3d);
MPI_Send(perm->iperms[mode] + rinfo->mat_start[mode], localdim,
SPLATT_MPI_IDX, 0, 2, rinfo->comm_3d);
/* receive factor */
MPI_Recv(mymat->vals, mymat->I * mymat->J, SPLATT_MPI_VAL, 0, 3,
rinfo->comm_3d, &status);
}
return mymat;
}
sptensor_t * mpi_rearrange_by_part(
sptensor_t const * const ttbuf,
int const * const parts,
MPI_Comm comm)
{
int rank, npes;
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &npes);
/* count how many to send to each process */
int * nsend = calloc(npes, sizeof(*nsend));
int * nrecv = calloc(npes, sizeof(*nrecv));
for(idx_t n=0; n < ttbuf->nnz; ++n) {
nsend[parts[n]] += 1;
}
MPI_Alltoall(nsend, 1, MPI_INT, nrecv, 1, MPI_INT, comm);
idx_t send_total = 0;
idx_t recv_total = 0;
for(int p=0; p < npes; ++p) {
send_total += nsend[p];
recv_total += nrecv[p];
}
assert(send_total = ttbuf->nnz);
/* how many nonzeros I'll own */
idx_t const nowned = recv_total;
int * send_disp = splatt_malloc((npes+1) * sizeof(*send_disp));
int * recv_disp = splatt_malloc((npes+1) * sizeof(*recv_disp));
/* recv_disp is const so we'll just fill it out once */
recv_disp[0] = 0;
for(int p=1; p <= npes; ++p) {
recv_disp[p] = recv_disp[p-1] + nrecv[p-1];
}
/* allocate my tensor and send buffer */
sptensor_t * tt = tt_alloc(nowned, ttbuf->nmodes);
idx_t * isend_buf = splatt_malloc(ttbuf->nnz * sizeof(*isend_buf));
/* rearrange into sendbuf and send one mode at a time */
for(idx_t m=0; m < ttbuf->nmodes; ++m) {
/* prefix sum to make disps */
send_disp[0] = send_disp[1] = 0;
for(int p=2; p <= npes; ++p) {
send_disp[p] = send_disp[p-1] + nsend[p-2];
}
idx_t const * const ind = ttbuf->ind[m];
for(idx_t n=0; n < ttbuf->nnz; ++n) {
idx_t const index = send_disp[parts[n]+1]++;
isend_buf[index] = ind[n];
}
/* exchange indices */
MPI_Alltoallv(isend_buf, nsend, send_disp, SPLATT_MPI_IDX,
tt->ind[m], nrecv, recv_disp, SPLATT_MPI_IDX,
comm);
}
splatt_free(isend_buf);
/* lastly, rearrange vals */
val_t * vsend_buf = splatt_malloc(ttbuf->nnz * sizeof(*vsend_buf));
send_disp[0] = send_disp[1] = 0;
for(int p=2; p <= npes; ++p) {
send_disp[p] = send_disp[p-1] + nsend[p-2];
}
val_t const * const vals = ttbuf->vals;
for(idx_t n=0; n < ttbuf->nnz; ++n) {
idx_t const index = send_disp[parts[n]+1]++;
vsend_buf[index] = vals[n];
}
/* exchange vals */
MPI_Alltoallv(vsend_buf, nsend, send_disp, SPLATT_MPI_VAL,
tt->vals, nrecv, recv_disp, SPLATT_MPI_VAL,
comm);
splatt_free(vsend_buf);
splatt_free(send_disp);
splatt_free(recv_disp);
/* allocated with calloc */
free(nsend);
free(nrecv);
/* fill dim info */
tt_fill_dims(tt);
return tt;
}
int mpi_determine_med_owner(
sptensor_t * const ttbuf,
idx_t const n,
rank_info * const rinfo)
{
int coords[MAX_NMODES];
assert(rinfo->decomp == SPLATT_DECOMP_MEDIUM);
/* determine the coordinates of the owner rank */
for(idx_t m=0; m < ttbuf->nmodes; ++m) {
idx_t const id = ttbuf->ind[m][n];
/* silly linear scan over each layer.
* TODO: do a binary search */
for(int l=1; l <= rinfo->dims_3d[m]; ++l) {
if(id < rinfo->layer_ptrs[m][l]) {
coords[m] = l-1;
break;
}
}
}
/* translate that to an MPI rank */
int owner;
MPI_Cart_rank(rinfo->comm_3d, coords, &owner);
return owner;
}
|
_CPULingo.c | /*
C implementation of SIML LINGOs on CPUs
#=============================================================================================
# COPYRIGHT NOTICE
#
# Written by Imran S. Haque (ihaque@cs.stanford.edu)
#
# Copyright (c) 2009-2010 Stanford University.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Stanford University nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#=============================================================================================
*/
#include "Python.h"
#include "numpy/arrayobject.h"
#include <stdint.h>
#include <stdio.h>
#ifdef USE_OPENMP
#include <omp.h>
#endif
inline int32_t max(int32_t a,int32_t b) {
return a>b ? a : b;
}
inline int32_t min(int32_t a,int32_t b) {
return a<b?a:b;
}
static float multisetTanimoto3_mags(int32_t* a,int32_t* b,int32_t* asize,int32_t* bsize,int32_t alen,int32_t blen,int32_t amag,int32_t bmag) {
// Version of Tanimoto code that uses explicit branches
if (amag == 0 || bmag == 0) return 0.0f;
int32_t i=0,j=0;
int32_t isct=0;
while ( i < alen && j < blen) {
if (a[i] == b[j]) {
isct += min(asize[i],bsize[j]);
i++;
j++;
} else if (a[i] < b[j]) {
i++;
} else { // b[j] < a[i]
j++;
}
}
return isct/((float)amag+bmag-isct);
}
#define CHECKARRAYTYPE(ary,name) if (PyArray_TYPE(ary) != NPY_INT32) {\
PyErr_SetString(PyExc_ValueError,name" was not of type int32");\
return NULL;\
}
#define CHECKARRAYCARRAY(ary,name) if ((PyArray_FLAGS(ary) & NPY_CARRAY) != NPY_CARRAY) {\
PyErr_SetString(PyExc_ValueError,name" was not a contiguous well-behaved array in C order");\
return NULL;\
}
static PyObject *_CPULingo_getTanimotoBlock(PyObject *self, PyObject *args) {
npy_intp dim2[2];
int32_t *reflingos,*refcounts,*refmags,*reflengths;
int32_t *qlingos,*qcounts,*qmags,*qlengths;
npy_intp *rldims,*rcdims,*rlstrides,*rcstrides,*qldims,*qcdims,*qlstrides,*qcstrides,*tan_strides;
int nrefmols,nqmols;
PyArrayObject* ary_tanimotos;
float* tanimotos;
int32_t *reflingoset,*refcountset;
int32_t refmag,reflength;
float* outputrow;
int32_t *qlingoset,*qcountset;
int32_t qmag,qlength;
int row,col;
float t;
int nprocs=1;
PyArrayObject *ary_reflingos,*ary_refcounts,*ary_refmags,*ary_reflengths,*ary_qlingos,*ary_qcounts,*ary_qmags,*ary_qlengths;
if (!PyArg_ParseTuple(args, "OOOOOOOO|i",
&ary_reflingos, &ary_refcounts, &ary_refmags, &ary_reflengths,
&ary_qlingos, &ary_qcounts, &ary_qmags, &ary_qlengths,
&nprocs)) {
return NULL;
}
// This is a serial function. We only accept the argument so that we can
// support the interface for gTBParallel if the user did not enable OpenMP
if (nprocs != 1) {
//fprintf(stderr,"Warning: called _CPULingo.getTanimotoBlocks or getTanimotoBlocksParallel requesting more than one CPU and pysiml not built with OpenMP support. Only using one CPU.\n");
nprocs = 1;
}
// Get pointers to array data
reflingos = (int32_t*) PyArray_DATA(ary_reflingos);
refcounts = (int32_t*) PyArray_DATA(ary_refcounts);
refmags = (int32_t*) PyArray_DATA(ary_refmags);
reflengths = (int32_t*) PyArray_DATA(ary_reflengths);
qlingos = (int32_t*) PyArray_DATA(ary_qlingos);
qcounts = (int32_t*) PyArray_DATA(ary_qcounts);
qmags = (int32_t*) PyArray_DATA(ary_qmags);
qlengths = (int32_t*) PyArray_DATA(ary_qlengths);
// Get dimensions of arrays (# molecules, maxlingos)
rldims = PyArray_DIMS(ary_reflingos);
rlstrides = PyArray_STRIDES(ary_reflingos);
rcdims = PyArray_DIMS(ary_refcounts);
rcstrides = PyArray_STRIDES(ary_refcounts);
qldims = PyArray_DIMS(ary_qlingos);
qlstrides = PyArray_STRIDES(ary_qlingos);
qcdims = PyArray_DIMS(ary_qcounts);
qcstrides = PyArray_STRIDES(ary_qcounts);
// Do some sanity checking on array dimensions {{{
// - make sure they are of int32 data type
CHECKARRAYTYPE(ary_reflingos,"Reference Lingo matrix");
CHECKARRAYTYPE(ary_refcounts,"Reference Lingo count matrix");
CHECKARRAYTYPE(ary_refmags,"Reference magnitude vector");
CHECKARRAYTYPE(ary_reflengths,"Reference length vector");
CHECKARRAYTYPE(ary_qlingos,"Query Lingo matrix");
CHECKARRAYTYPE(ary_qcounts,"Query Lingo count matrix");
CHECKARRAYTYPE(ary_qmags,"Query magnitude vector");
CHECKARRAYTYPE(ary_qlengths,"Query length vector");
// - make sure lingo/count/mag arrays are 2d and are the same size in a set (ref/q)
if (ary_reflingos->nd != 2) {
PyErr_SetString(PyExc_ValueError,"Reference Lingo matrix did not have dimension 2");
return NULL;
}
if (ary_refcounts->nd != 2) {
PyErr_SetString(PyExc_ValueError,"Reference Lingo count matrix did not have dimension 2");
return NULL;
}
if (rldims[0] != rcdims[0] || rldims[1] != rcdims[1]) {
PyErr_SetString(PyExc_ValueError,"Reference Lingo and Lingo count matrix did not have identical shapes");
return NULL;
}
if (ary_qlingos->nd != 2) {
PyErr_SetString(PyExc_ValueError,"Query Lingo matrix did not have dimension 2");
return NULL;
}
if (ary_qcounts->nd != 2) {
PyErr_SetString(PyExc_ValueError,"Query Lingo count matrix did not have dimension 2");
return NULL;
}
if (qldims[0] != qcdims[0] || qldims[1] != qcdims[1]) {
PyErr_SetString(PyExc_ValueError,"Query Lingo and Lingo count matrix did not have identical shapes");
return NULL;
}
// - make sure stride is 4 in last dimension (ie, is C-style and contiguous)
CHECKARRAYCARRAY(ary_reflingos,"Reference Lingo matrix");
CHECKARRAYCARRAY(ary_refcounts,"Reference Lingo count matrix");
CHECKARRAYCARRAY(ary_refmags,"Reference magnitude vector");
CHECKARRAYCARRAY(ary_reflengths,"Reference length vector");
CHECKARRAYCARRAY(ary_qlingos,"Query Lingo matrix");
CHECKARRAYCARRAY(ary_qcounts,"Query Lingo count matrix");
CHECKARRAYCARRAY(ary_qmags,"Query magnitude vector");
CHECKARRAYCARRAY(ary_qlengths,"Query length vector");
// - make sure lengths/mags are 1d or (Nx1) and have same length as #rows of l/c arrays
if (!(ary_reflengths->nd == 1 || (ary_reflengths->nd == 2 && ary_reflengths->dimensions[1] == 1))) {
PyErr_SetString(PyExc_ValueError,"Reference length vector was not 1-D");
return NULL;
}
if (ary_reflengths->dimensions[0] != rldims[0]) {
PyErr_SetString(PyExc_ValueError,"Reference length vector length did not equal number of rows of reference Lingo matrix");
return NULL;
}
if (!(ary_refmags->nd == 1 || (ary_refmags->nd == 2 && ary_refmags->dimensions[1] == 1))) {
PyErr_SetString(PyExc_ValueError,"Reference magnitude vector was not 1-D");
return NULL;
}
if (ary_refmags->dimensions[0] != rldims[0]) {
PyErr_SetString(PyExc_ValueError,"Reference magnitude vector length did not equal number of rows of reference Lingo matrix");
return NULL;
}
if (!(ary_qlengths->nd == 1 || (ary_qlengths->nd == 2 && ary_qlengths->dimensions[1] == 1))) {
PyErr_SetString(PyExc_ValueError,"Query length vector was not 1-D");
return NULL;
}
if (ary_qlengths->dimensions[0] != qldims[0]) {
PyErr_SetString(PyExc_ValueError,"Query length vector length did not equal number of rows of query Lingo matrix");
return NULL;
}
if (!(ary_qmags->nd == 1 || (ary_qmags->nd == 2 && ary_qmags->dimensions[1] == 1))) {
PyErr_SetString(PyExc_ValueError,"Query magnitude vector was not 1-D");
return NULL;
}
if (ary_qmags->dimensions[0] != qldims[0]) {
PyErr_SetString(PyExc_ValueError,"Query magnitude vector length did not equal number of rows of query Lingo matrix");
return NULL;
}
// }}}
/*
printf("Got reference matrix of size %ld x %ld and stride (%ld,%ld)\n",rldims[0],rldims[1],rlstrides[0],rlstrides[1]);
printf("Got reference lengths of size %ld and stride %ld\n",PyArray_DIMS(ary_reflengths)[0],PyArray_STRIDES(ary_reflengths)[0]);
printf("Got reference mags of size %ld and stride %ld\n",PyArray_DIMS(ary_refmags)[0],PyArray_STRIDES(ary_refmags)[0]);
printf("Got query matrix of size %ld x %ld and stride (%ld,%ld)\n",qldims[0],qldims[1],qlstrides[0],qlstrides[1]);
printf("Got query lengths of size %ld and stride %ld\n",PyArray_DIMS(ary_qlengths)[0],PyArray_STRIDES(ary_qlengths)[0]);
printf("Got query mags of size %ld and stride %ld\n",PyArray_DIMS(ary_qmags)[0],PyArray_STRIDES(ary_qmags)[0]);
*/
nrefmols = rldims[0];
nqmols = qldims[0];
// Create return array containing Tanimotos
dim2[0] = nrefmols;
dim2[1] = nqmols;
ary_tanimotos = (PyArrayObject*) PyArray_SimpleNew(2,dim2,NPY_FLOAT);
tanimotos = (float*) PyArray_DATA(ary_tanimotos);
tan_strides = PyArray_STRIDES(ary_tanimotos);
// Fill this array with Tanimotos, one element at a time...
for (row = 0; row < nrefmols; row++) {
reflingoset = reflingos + row*rlstrides[0]/4;
refcountset = refcounts + row*rcstrides[0]/4;
refmag = refmags[row];
reflength = reflengths[row];
outputrow = tanimotos + row*tan_strides[0]/4;
//printf("Got reference set Lingos:");
//for (i = 0; i < reflength; i++) printf(" %08x",reflingoset[i]);
//printf("\n");
//printf("Got reference set counts:");
//for (i = 0; i < reflength; i++) printf(" %08x",refcountset[i]);
//printf("\n");
//printf("Got reference set length %d, magnitude %d\n",reflength,refmag);
for (col = 0; col < nqmols; col++) {
qlingoset = qlingos + col*qlstrides[0]/4;
qcountset = qcounts + col*qcstrides[0]/4;
qmag = qmags[col];
qlength = qlengths[col];
//printf("\tGot query set Lingos:");
//for (i = 0; i < qlength; i++) printf(" %08x",qlingoset[i]);
//printf("\n");
//printf("\tGot query set counts:");
//for (i = 0; i < qlength; i++) printf(" %08x",qcountset[i]);
//printf("\n");
//printf("\tGot query set length %d, magnitude %d\n",qlength,qmag);
t = multisetTanimoto3_mags(reflingoset,qlingoset,refcountset,qcountset,reflength,qlength,refmag,qmag);
outputrow[col] = t;
//printf("\tTanimoto = %f\n",t);
}
}
return PyArray_Return(ary_tanimotos);
}
#ifdef USE_OPENMP
static PyObject *_CPULingo_getTanimotoBlockParallel(PyObject *self, PyObject *args) {
npy_intp dim2[2];
int32_t *reflingos,*refcounts,*refmags,*reflengths;
int32_t *qlingos,*qcounts,*qmags,*qlengths;
npy_intp *rldims,*rcdims,*rlstrides,*rcstrides,*qldims,*qcdims,*qlstrides,*qcstrides,*tan_strides;
int nrefmols,nqmols;
PyArrayObject* ary_tanimotos;
float* tanimotos;
int32_t *reflingoset,*refcountset;
int32_t refmag,reflength;
float* outputrow;
int32_t *qlingoset,*qcountset;
int32_t qmag,qlength;
int row,col;
float t;
int nprocs=0;
PyArrayObject *ary_reflingos,*ary_refcounts,*ary_refmags,*ary_reflengths,*ary_qlingos,*ary_qcounts,*ary_qmags,*ary_qlengths;
if (!PyArg_ParseTuple(args, "OOOOOOOO|i",
&ary_reflingos, &ary_refcounts, &ary_refmags, &ary_reflengths,
&ary_qlingos, &ary_qcounts, &ary_qmags, &ary_qlengths,
&nprocs)) {
return NULL;
}
// Get pointers to array data
reflingos = (int32_t*) PyArray_DATA(ary_reflingos);
refcounts = (int32_t*) PyArray_DATA(ary_refcounts);
refmags = (int32_t*) PyArray_DATA(ary_refmags);
reflengths = (int32_t*) PyArray_DATA(ary_reflengths);
qlingos = (int32_t*) PyArray_DATA(ary_qlingos);
qcounts = (int32_t*) PyArray_DATA(ary_qcounts);
qmags = (int32_t*) PyArray_DATA(ary_qmags);
qlengths = (int32_t*) PyArray_DATA(ary_qlengths);
// Get dimensions of arrays (# molecules, maxlingos)
rldims = PyArray_DIMS(ary_reflingos);
rlstrides = PyArray_STRIDES(ary_reflingos);
rcdims = PyArray_DIMS(ary_refcounts);
rcstrides = PyArray_STRIDES(ary_refcounts);
qldims = PyArray_DIMS(ary_qlingos);
qlstrides = PyArray_STRIDES(ary_qlingos);
qcdims = PyArray_DIMS(ary_qcounts);
qcstrides = PyArray_STRIDES(ary_qcounts);
// Do some sanity checking on array dimensions {{{
// - make sure they are of int32 data type
CHECKARRAYTYPE(ary_reflingos,"Reference Lingo matrix");
CHECKARRAYTYPE(ary_refcounts,"Reference Lingo count matrix");
CHECKARRAYTYPE(ary_refmags,"Reference magnitude vector");
CHECKARRAYTYPE(ary_reflengths,"Reference length vector");
CHECKARRAYTYPE(ary_qlingos,"Query Lingo matrix");
CHECKARRAYTYPE(ary_qcounts,"Query Lingo count matrix");
CHECKARRAYTYPE(ary_qmags,"Query magnitude vector");
CHECKARRAYTYPE(ary_qlengths,"Query length vector");
// - make sure lingo/count/mag arrays are 2d and are the same size in a set (ref/q)
if (ary_reflingos->nd != 2) {
PyErr_SetString(PyExc_TypeError,"Reference Lingo matrix did not have dimension 2");
return NULL;
}
if (ary_refcounts->nd != 2) {
PyErr_SetString(PyExc_TypeError,"Reference Lingo count matrix did not have dimension 2");
return NULL;
}
if (rldims[0] != rcdims[0] || rldims[1] != rcdims[1]) {
PyErr_SetString(PyExc_TypeError,"Reference Lingo and Lingo count matrix did not have identical shapes");
return NULL;
}
if (ary_qlingos->nd != 2) {
PyErr_SetString(PyExc_TypeError,"Query Lingo matrix did not have dimension 2");
return NULL;
}
if (ary_qcounts->nd != 2) {
PyErr_SetString(PyExc_TypeError,"Query Lingo count matrix did not have dimension 2");
return NULL;
}
if (qldims[0] != qcdims[0] || qldims[1] != qcdims[1]) {
PyErr_SetString(PyExc_TypeError,"Query Lingo and Lingo count matrix did not have identical shapes");
return NULL;
}
// - make sure stride is 4 in last dimension (ie, is C-style and contiguous)
CHECKARRAYCARRAY(ary_reflingos,"Reference Lingo matrix");
CHECKARRAYCARRAY(ary_refcounts,"Reference Lingo count matrix");
CHECKARRAYCARRAY(ary_refmags,"Reference magnitude vector");
CHECKARRAYCARRAY(ary_reflengths,"Reference length vector");
CHECKARRAYCARRAY(ary_qlingos,"Query Lingo matrix");
CHECKARRAYCARRAY(ary_qcounts,"Query Lingo count matrix");
CHECKARRAYCARRAY(ary_qmags,"Query magnitude vector");
CHECKARRAYCARRAY(ary_qlengths,"Query length vector");
// - make sure lengths/mags are 1d or (Nx1) and have same length as #rows of l/c arrays
if (!(ary_reflengths->nd == 1 || (ary_reflengths->nd == 2 && ary_reflengths->dimensions[1] == 1))) {
PyErr_SetString(PyExc_TypeError,"Reference length vector was not 1-D");
return NULL;
}
if (ary_reflengths->dimensions[0] != rldims[0]) {
PyErr_SetString(PyExc_TypeError,"Reference length vector length did not equal number of rows of reference Lingo matrix");
return NULL;
}
if (!(ary_refmags->nd == 1 || (ary_refmags->nd == 2 && ary_refmags->dimensions[1] == 1))) {
PyErr_SetString(PyExc_TypeError,"Reference magnitude vector was not 1-D");
return NULL;
}
if (ary_refmags->dimensions[0] != rldims[0]) {
PyErr_SetString(PyExc_TypeError,"Reference magnitude vector length did not equal number of rows of reference Lingo matrix");
return NULL;
}
if (!(ary_qlengths->nd == 1 || (ary_qlengths->nd == 2 && ary_qlengths->dimensions[1] == 1))) {
PyErr_SetString(PyExc_TypeError,"Query length vector was not 1-D");
return NULL;
}
if (ary_qlengths->dimensions[0] != qldims[0]) {
PyErr_SetString(PyExc_TypeError,"Query length vector length did not equal number of rows of query Lingo matrix");
return NULL;
}
if (!(ary_qmags->nd == 1 || (ary_qmags->nd == 2 && ary_qmags->dimensions[1] == 1))) {
PyErr_SetString(PyExc_TypeError,"Query magnitude vector was not 1-D");
return NULL;
}
if (ary_qmags->dimensions[0] != qldims[0]) {
PyErr_SetString(PyExc_TypeError,"Query magnitude vector length did not equal number of rows of query Lingo matrix");
return NULL;
}
// }}}
/*
printf("Got reference matrix of size %ld x %ld and stride (%ld,%ld)\n",rldims[0],rldims[1],rlstrides[0],rlstrides[1]);
printf("Got reference lengths of size %ld and stride %ld\n",PyArray_DIMS(ary_reflengths)[0],PyArray_STRIDES(ary_reflengths)[0]);
printf("Got reference mags of size %ld and stride %ld\n",PyArray_DIMS(ary_refmags)[0],PyArray_STRIDES(ary_refmags)[0]);
printf("Got query matrix of size %ld x %ld and stride (%ld,%ld)\n",qldims[0],qldims[1],qlstrides[0],qlstrides[1]);
printf("Got query lengths of size %ld and stride %ld\n",PyArray_DIMS(ary_qlengths)[0],PyArray_STRIDES(ary_qlengths)[0]);
printf("Got query mags of size %ld and stride %ld\n",PyArray_DIMS(ary_qmags)[0],PyArray_STRIDES(ary_qmags)[0]);
*/
nrefmols = rldims[0];
nqmols = qldims[0];
// Create return array containing Tanimotos
dim2[0] = nrefmols;
dim2[1] = nqmols;
ary_tanimotos = (PyArrayObject*) PyArray_SimpleNew(2,dim2,NPY_FLOAT);
tanimotos = (float*) PyArray_DATA(ary_tanimotos);
tan_strides = PyArray_STRIDES(ary_tanimotos);
// Fill this array with Tanimotos, parallelized over rows
if (nprocs > 0) omp_set_num_threads(nprocs);
#pragma omp parallel for default(none) shared(nrefmols,nqmols,rlstrides,rcstrides,reflingos,refcounts,refmags,reflengths,tanimotos,tan_strides,qlingos,qlstrides,qcounts,qcstrides,qmags,qlengths) private(row,col,reflingoset,refcountset,refmag,reflength,qlingoset,qcountset,qmag,qlength,t,outputrow)
for (row = 0; row < nrefmols; row++) {
reflingoset = reflingos + row*rlstrides[0]/4;
refcountset = refcounts + row*rcstrides[0]/4;
refmag = refmags[row];
reflength = reflengths[row];
outputrow = tanimotos + row*tan_strides[0]/4;
//printf("Got reference set Lingos:");
//for (i = 0; i < reflength; i++) printf(" %08x",reflingoset[i]);
//printf("\n");
//printf("Got reference set counts:");
//for (i = 0; i < reflength; i++) printf(" %08x",refcountset[i]);
//printf("\n");
//printf("Got reference set length %d, magnitude %d\n",reflength,refmag);
for (col = 0; col < nqmols; col++) {
qlingoset = qlingos + col*qlstrides[0]/4;
qcountset = qcounts + col*qcstrides[0]/4;
qmag = qmags[col];
qlength = qlengths[col];
//printf("\tGot query set Lingos:");
//for (i = 0; i < qlength; i++) printf(" %08x",qlingoset[i]);
//printf("\n");
//printf("\tGot query set counts:");
//for (i = 0; i < qlength; i++) printf(" %08x",qcountset[i]);
//printf("\n");
//printf("\tGot query set length %d, magnitude %d\n",qlength,qmag);
t = multisetTanimoto3_mags(reflingoset,qlingoset,refcountset,qcountset,reflength,qlength,refmag,qmag);
outputrow[col] = t;
//printf("\tTanimoto = %f\n",t);
}
}
return PyArray_Return(ary_tanimotos);
}
static PyObject *_CPULingo_supportsParallel(PyObject *self, PyObject *args) {
return Py_True;
}
#else
static PyObject *_CPULingo_supportsParallel(PyObject *self, PyObject *args) {
return Py_False;
}
#endif
static PyMethodDef _CPULingo_methods[] = {
{"getTanimotoBlock", (PyCFunction)_CPULingo_getTanimotoBlock, METH_VARARGS, "Computes a block of Tanimotos using the sparse-vector SIML algorithm"},
{"supportsParallel", (PyCFunction)_CPULingo_supportsParallel, METH_VARARGS, "Returns True if pySIML was built with OpenMP support"},
#ifdef USE_OPENMP
{"getTanimotoBlockParallel", (PyCFunction)_CPULingo_getTanimotoBlockParallel, METH_VARARGS, "Computes a block of Tanimotos using the sparse-vector SIML algorithm, parallelized over rows"},
#else
{"getTanimotoBlockParallel", (PyCFunction)_CPULingo_getTanimotoBlock, METH_VARARGS, "Computes a block of Tanimotos using the sparse-vector SIML algorithm (warning: pysiml built without OpenMP support, this function is not parallelized)"},
#endif
{NULL, NULL, 0, NULL}
};
DL_EXPORT(void) init_CPULingo(void)
{
Py_InitModule3("_CPULingo", _CPULingo_methods, "Computes LINGO Tanimotos using the SIML method\n");
import_array();
}
|
c-tree.h | /* Definitions for C parsing and type checking.
Copyright (C) 1987-2014 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_C_TREE_H
#define GCC_C_TREE_H
#include "c-family/c-common.h"
#include "diagnostic.h"
/* struct lang_identifier is private to c-decl.c, but langhooks.c needs to
know how big it is. This is sanity-checked in c-decl.c. */
#define C_SIZEOF_STRUCT_LANG_IDENTIFIER \
(sizeof (struct c_common_identifier) + 3 * sizeof (void *))
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */
#define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE
nonzero if the definition of the type has already started. */
#define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE)
/* In an incomplete RECORD_TYPE or UNION_TYPE, a list of variable
declarations whose type would be completed by completing that type. */
#define C_TYPE_INCOMPLETE_VARS(TYPE) TYPE_VFIELD (TYPE)
/* In an IDENTIFIER_NODE, nonzero if this identifier is actually a
keyword. C_RID_CODE (node) is then the RID_* value of the keyword,
and C_RID_YYCODE is the token number wanted by Yacc. */
#define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID)
/* Record whether a type or decl was written with nonconstant size.
Note that TYPE_SIZE may have simplified to a constant. */
#define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE)
#define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE)
/* Record whether a type is defined inside a struct or union type.
This is used for -Wc++-compat. */
#define C_TYPE_DEFINED_IN_STRUCT(TYPE) TYPE_LANG_FLAG_2 (TYPE)
/* Record whether a typedef for type `int' was actually `signed int'. */
#define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was defined without an explicit
return type. */
#define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */
#define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP)
/* For FUNCTION_DECLs, evaluates true if the decl is built-in but has
been declared. */
#define C_DECL_DECLARED_BUILTIN(EXP) \
DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP))
/* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a
built-in prototype and does not have a non-built-in prototype. */
#define C_DECL_BUILTIN_PROTOTYPE(EXP) \
DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a decl was declared register. This is strictly a
front-end flag, whereas DECL_REGISTER is used for code generation;
they may differ for structures with volatile fields. */
#define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP)
/* Record whether a decl was used in an expression anywhere except an
unevaluated operand of sizeof / typeof / alignof. This is only
used for functions declared static but not defined, though outside
sizeof and typeof it is set for other function decls as well. */
#define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a variable has been declared threadprivate by
#pragma omp threadprivate. */
#define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL))
/* Nonzero for a decl which either doesn't exist or isn't a prototype.
N.B. Could be simplified if all built-in decls had complete prototypes
(but this is presently difficult because some of them need FILE*). */
#define C_DECL_ISNT_PROTOTYPE(EXP) \
(EXP == 0 \
|| (!prototype_p (TREE_TYPE (EXP)) \
&& !DECL_BUILT_IN (EXP)))
/* For FUNCTION_TYPE, a hidden list of types of arguments. The same as
TYPE_ARG_TYPES for functions with prototypes, but created for functions
without prototypes. */
#define TYPE_ACTUAL_ARG_TYPES(NODE) TYPE_LANG_SLOT_1 (NODE)
/* For a CONSTRUCTOR, whether some initializer contains a
subexpression meaning it is not a constant expression. */
#define CONSTRUCTOR_NON_CONST(EXPR) TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (EXPR))
/* Record parser information about an expression that is irrelevant
for code generation alongside a tree representing its value. */
struct c_expr
{
/* The value of the expression. */
tree value;
/* Record the original unary/binary operator of an expression, which may
have been changed by fold, STRING_CST for unparenthesized string
constants, C_MAYBE_CONST_EXPR for __builtin_constant_p calls
(even if parenthesized), for subexpressions, and for non-constant
initializers, or ERROR_MARK for other expressions (including
parenthesized expressions). */
enum tree_code original_code;
/* If not NULL, the original type of an expression. This will
differ from the type of the value field for an enum constant.
The type of an enum constant is a plain integer type, but this
field will be the enum type. */
tree original_type;
};
/* Type alias for struct c_expr. This allows to use the structure
inside the VEC types. */
typedef struct c_expr c_expr_t;
/* A kind of type specifier. Note that this information is currently
only used to distinguish tag definitions, tag references and typeof
uses. */
enum c_typespec_kind {
/* No typespec. This appears only in struct c_declspec. */
ctsk_none,
/* A reserved keyword type specifier. */
ctsk_resword,
/* A reference to a tag, previously declared, such as "struct foo".
This includes where the previous declaration was as a different
kind of tag, in which case this is only valid if shadowing that
tag in an inner scope. */
ctsk_tagref,
/* A reference to a tag, not previously declared in a visible
scope. */
ctsk_tagfirstref,
/* A definition of a tag such as "struct foo { int a; }". */
ctsk_tagdef,
/* A typedef name. */
ctsk_typedef,
/* An ObjC-specific kind of type specifier. */
ctsk_objc,
/* A typeof specifier, or _Atomic ( type-name ). */
ctsk_typeof
};
/* A type specifier: this structure is created in the parser and
passed to declspecs_add_type only. */
struct c_typespec {
/* What kind of type specifier this is. */
enum c_typespec_kind kind;
/* Whether the expression has operands suitable for use in constant
expressions. */
bool expr_const_operands;
/* The specifier itself. */
tree spec;
/* An expression to be evaluated before the type specifier, in the
case of typeof specifiers, or NULL otherwise or if no such
expression is required for a particular typeof specifier. In
particular, when typeof is applied to an expression of variably
modified type, that expression must be evaluated in order to
determine array sizes that form part of the type, but the
expression itself (as opposed to the array sizes) forms no part
of the type and so needs to be recorded separately. */
tree expr;
};
/* A storage class specifier. */
enum c_storage_class {
csc_none,
csc_auto,
csc_extern,
csc_register,
csc_static,
csc_typedef
};
/* A type specifier keyword "void", "_Bool", "char", "int", "float",
"double", "_Decimal32", "_Decimal64", "_Decimal128", "_Fract", "_Accum",
or none of these. */
enum c_typespec_keyword {
cts_none,
cts_void,
cts_bool,
cts_char,
cts_int,
cts_float,
cts_int128,
cts_double,
cts_dfloat32,
cts_dfloat64,
cts_dfloat128,
cts_fract,
cts_accum,
cts_auto_type
};
/* This enum lists all the possible declarator specifiers, storage
class or attribute that a user can write. There is at least one
enumerator per possible declarator specifier in the struct
c_declspecs below.
It is used to index the array of declspec locations in struct
c_declspecs. */
enum c_declspec_word {
cdw_typespec /* A catch-all for a typespec. */,
cdw_storage_class /* A catch-all for a storage class */,
cdw_attributes,
cdw_typedef,
cdw_explicit_signed,
cdw_deprecated,
cdw_default_int,
cdw_long,
cdw_long_long,
cdw_short,
cdw_signed,
cdw_unsigned,
cdw_complex,
cdw_inline,
cdw_noreturn,
cdw_thread,
cdw_const,
cdw_volatile,
cdw_restrict,
cdw_saturating,
cdw_alignas,
cdw_address_space,
cdw_number_of_elements /* This one must always be the last
enumerator. */
};
/* A sequence of declaration specifiers in C. When a new declaration
specifier is added, please update the enum c_declspec_word above
accordingly. */
struct c_declspecs {
source_location locations[cdw_number_of_elements];
/* The type specified, if a single type specifier such as a struct,
union or enum specifier, typedef name or typeof specifies the
whole type, or NULL_TREE if none or a keyword such as "void" or
"char" is used. Does not include qualifiers. */
tree type;
/* Any expression to be evaluated before the type, from a typeof
specifier. */
tree expr;
/* The attributes from a typedef decl. */
tree decl_attr;
/* When parsing, the attributes. Outside the parser, this will be
NULL; attributes (possibly from multiple lists) will be passed
separately. */
tree attrs;
/* The base-2 log of the greatest alignment required by an _Alignas
specifier, in bytes, or -1 if no such specifiers with nonzero
alignment. */
int align_log;
/* The storage class specifier, or csc_none if none. */
enum c_storage_class storage_class;
/* Any type specifier keyword used such as "int", not reflecting
modifiers such as "short", or cts_none if none. */
ENUM_BITFIELD (c_typespec_keyword) typespec_word : 8;
/* The kind of type specifier if one has been seen, ctsk_none
otherwise. */
ENUM_BITFIELD (c_typespec_kind) typespec_kind : 3;
/* Whether any expressions in typeof specifiers may appear in
constant expressions. */
BOOL_BITFIELD expr_const_operands : 1;
/* Whether any declaration specifiers have been seen at all. */
BOOL_BITFIELD declspecs_seen_p : 1;
/* Whether something other than a storage class specifier or
attribute has been seen. This is used to warn for the
obsolescent usage of storage class specifiers other than at the
start of the list. (Doing this properly would require function
specifiers to be handled separately from storage class
specifiers.) */
BOOL_BITFIELD non_sc_seen_p : 1;
/* Whether the type is specified by a typedef or typeof name. */
BOOL_BITFIELD typedef_p : 1;
/* Whether the type is explicitly "signed" or specified by a typedef
whose type is explicitly "signed". */
BOOL_BITFIELD explicit_signed_p : 1;
/* Whether the specifiers include a deprecated typedef. */
BOOL_BITFIELD deprecated_p : 1;
/* Whether the type defaulted to "int" because there were no type
specifiers. */
BOOL_BITFIELD default_int_p : 1;
/* Whether "long" was specified. */
BOOL_BITFIELD long_p : 1;
/* Whether "long" was specified more than once. */
BOOL_BITFIELD long_long_p : 1;
/* Whether "short" was specified. */
BOOL_BITFIELD short_p : 1;
/* Whether "signed" was specified. */
BOOL_BITFIELD signed_p : 1;
/* Whether "unsigned" was specified. */
BOOL_BITFIELD unsigned_p : 1;
/* Whether "complex" was specified. */
BOOL_BITFIELD complex_p : 1;
/* Whether "inline" was specified. */
BOOL_BITFIELD inline_p : 1;
/* Whether "_Noreturn" was speciied. */
BOOL_BITFIELD noreturn_p : 1;
/* Whether "__thread" or "_Thread_local" was specified. */
BOOL_BITFIELD thread_p : 1;
/* Whether "__thread" rather than "_Thread_local" was specified. */
BOOL_BITFIELD thread_gnu_p : 1;
/* Whether "const" was specified. */
BOOL_BITFIELD const_p : 1;
/* Whether "volatile" was specified. */
BOOL_BITFIELD volatile_p : 1;
/* Whether "restrict" was specified. */
BOOL_BITFIELD restrict_p : 1;
/* Whether "_Atomic" was specified. */
BOOL_BITFIELD atomic_p : 1;
/* Whether "_Sat" was specified. */
BOOL_BITFIELD saturating_p : 1;
/* Whether any alignment specifier (even with zero alignment) was
specified. */
BOOL_BITFIELD alignas_p : 1;
/* The address space that the declaration belongs to. */
addr_space_t address_space;
};
/* The various kinds of declarators in C. */
enum c_declarator_kind {
/* An identifier. */
cdk_id,
/* A function. */
cdk_function,
/* An array. */
cdk_array,
/* A pointer. */
cdk_pointer,
/* Parenthesized declarator with nested attributes. */
cdk_attrs
};
typedef struct c_arg_tag_d {
/* The argument name. */
tree id;
/* The type of the argument. */
tree type;
} c_arg_tag;
/* Information about the parameters in a function declarator. */
struct c_arg_info {
/* A list of parameter decls. */
tree parms;
/* A list of structure, union and enum tags defined. */
vec<c_arg_tag, va_gc> *tags;
/* A list of argument types to go in the FUNCTION_TYPE. */
tree types;
/* A list of non-parameter decls (notably enumeration constants)
defined with the parameters. */
tree others;
/* A compound expression of VLA sizes from the parameters, or NULL.
In a function definition, these are used to ensure that
side-effects in sizes of arrays converted to pointers (such as a
parameter int i[n++]) take place; otherwise, they are
ignored. */
tree pending_sizes;
/* True when these arguments had [*]. */
BOOL_BITFIELD had_vla_unspec : 1;
};
/* A declarator. */
struct c_declarator {
/* The kind of declarator. */
enum c_declarator_kind kind;
location_t id_loc; /* Currently only set for cdk_id, cdk_array. */
/* Except for cdk_id, the contained declarator. For cdk_id, NULL. */
struct c_declarator *declarator;
union {
/* For identifiers, an IDENTIFIER_NODE or NULL_TREE if an abstract
declarator. */
tree id;
/* For functions. */
struct c_arg_info *arg_info;
/* For arrays. */
struct {
/* The array dimension, or NULL for [] and [*]. */
tree dimen;
/* The qualifiers inside []. */
int quals;
/* The attributes (currently ignored) inside []. */
tree attrs;
/* Whether [static] was used. */
BOOL_BITFIELD static_p : 1;
/* Whether [*] was used. */
BOOL_BITFIELD vla_unspec_p : 1;
} array;
/* For pointers, the qualifiers on the pointer type. */
int pointer_quals;
/* For attributes. */
tree attrs;
} u;
};
/* A type name. */
struct c_type_name {
/* The declaration specifiers. */
struct c_declspecs *specs;
/* The declarator. */
struct c_declarator *declarator;
};
/* A parameter. */
struct c_parm {
/* The declaration specifiers, minus any prefix attributes. */
struct c_declspecs *specs;
/* The attributes. */
tree attrs;
/* The declarator. */
struct c_declarator *declarator;
};
/* Used when parsing an enum. Initialized by start_enum. */
struct c_enum_contents
{
/* While defining an enum type, this is 1 plus the last enumerator
constant value. */
tree enum_next_value;
/* Nonzero means that there was overflow computing enum_next_value. */
int enum_overflow;
};
/* A type of reference to a static identifier in an inline
function. */
enum c_inline_static_type {
/* Identifier with internal linkage used in function that may be an
inline definition (i.e., file-scope static). */
csi_internal,
/* Modifiable object with static storage duration defined in
function that may be an inline definition (i.e., local
static). */
csi_modifiable
};
/* in c-parser.c */
extern void c_parse_init (void);
/* in c-aux-info.c */
extern void gen_aux_info_record (tree, int, int, int);
/* in c-decl.c */
struct c_spot_bindings;
struct c_struct_parse_info;
extern struct obstack parser_obstack;
extern tree c_break_label;
extern tree c_cont_label;
extern bool global_bindings_p (void);
extern void push_scope (void);
extern tree pop_scope (void);
extern void c_bindings_start_stmt_expr (struct c_spot_bindings *);
extern void c_bindings_end_stmt_expr (struct c_spot_bindings *);
extern void record_inline_static (location_t, tree, tree,
enum c_inline_static_type);
extern void c_init_decl_processing (void);
extern void c_print_identifier (FILE *, tree, int);
extern int quals_from_declspecs (const struct c_declspecs *);
extern struct c_declarator *build_array_declarator (location_t, tree,
struct c_declspecs *,
bool, bool);
extern tree build_enumerator (location_t, location_t, struct c_enum_contents *,
tree, tree);
extern tree check_for_loop_decls (location_t, bool);
extern void mark_forward_parm_decls (void);
extern void declare_parm_level (void);
extern void undeclared_variable (location_t, tree);
extern tree lookup_label_for_goto (location_t, tree);
extern tree declare_label (tree);
extern tree define_label (location_t, tree);
extern struct c_spot_bindings *c_get_switch_bindings (void);
extern void c_release_switch_bindings (struct c_spot_bindings *);
extern bool c_check_switch_jump_warnings (struct c_spot_bindings *,
location_t, location_t);
extern void finish_decl (tree, location_t, tree, tree, tree);
extern tree finish_enum (tree, tree, tree);
extern void finish_function (void);
extern tree finish_struct (location_t, tree, tree, tree,
struct c_struct_parse_info *);
extern struct c_arg_info *build_arg_info (void);
extern struct c_arg_info *get_parm_info (bool, tree);
extern tree grokfield (location_t, struct c_declarator *,
struct c_declspecs *, tree, tree *);
extern tree groktypename (struct c_type_name *, tree *, bool *);
extern tree grokparm (const struct c_parm *, tree *);
extern tree implicitly_declare (location_t, tree);
extern void keep_next_level (void);
extern void pending_xref_error (void);
extern void c_push_function_context (void);
extern void c_pop_function_context (void);
extern void push_parm_decl (const struct c_parm *, tree *);
extern struct c_declarator *set_array_declarator_inner (struct c_declarator *,
struct c_declarator *);
extern tree c_builtin_function (tree);
extern tree c_builtin_function_ext_scope (tree);
extern void shadow_tag (const struct c_declspecs *);
extern void shadow_tag_warned (const struct c_declspecs *, int);
extern tree start_enum (location_t, struct c_enum_contents *, tree);
extern int start_function (struct c_declspecs *, struct c_declarator *, tree);
extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool,
tree);
extern tree start_struct (location_t, enum tree_code, tree,
struct c_struct_parse_info **);
extern void store_parm_decls (void);
extern void store_parm_decls_from (struct c_arg_info *);
extern void temp_store_parm_decls (tree, tree);
extern void temp_pop_parm_decls (void);
extern tree xref_tag (enum tree_code, tree);
extern struct c_typespec parser_xref_tag (location_t, enum tree_code, tree);
extern struct c_parm *build_c_parm (struct c_declspecs *, tree,
struct c_declarator *);
extern struct c_declarator *build_attrs_declarator (tree,
struct c_declarator *);
extern struct c_declarator *build_function_declarator (struct c_arg_info *,
struct c_declarator *);
extern struct c_declarator *build_id_declarator (tree);
extern struct c_declarator *make_pointer_declarator (struct c_declspecs *,
struct c_declarator *);
extern struct c_declspecs *build_null_declspecs (void);
extern struct c_declspecs *declspecs_add_qual (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_type (location_t,
struct c_declspecs *,
struct c_typespec);
extern struct c_declspecs *declspecs_add_scspec (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_attrs (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_addrspace (source_location,
struct c_declspecs *,
addr_space_t);
extern struct c_declspecs *declspecs_add_alignas (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *finish_declspecs (struct c_declspecs *);
/* in c-objc-common.c */
extern bool c_objc_common_init (void);
extern bool c_missing_noreturn_ok_p (tree);
extern bool c_warn_unused_global_decl (const_tree);
extern void c_initialize_diagnostics (diagnostic_context *);
extern bool c_vla_unspec_p (tree x, tree fn);
/* in c-typeck.c */
extern int in_alignof;
extern int in_sizeof;
extern int in_typeof;
extern tree c_last_sizeof_arg;
extern struct c_switch *c_switch_stack;
extern tree c_objc_common_truthvalue_conversion (location_t, tree);
extern tree require_complete_type (tree);
extern int same_translation_unit_p (const_tree, const_tree);
extern int comptypes (tree, tree);
extern int comptypes_check_different_types (tree, tree, bool *);
extern bool c_vla_type_p (const_tree);
extern bool c_mark_addressable (tree);
extern void c_incomplete_type_error (const_tree, const_tree);
extern tree c_type_promotes_to (tree);
extern struct c_expr default_function_array_conversion (location_t,
struct c_expr);
extern struct c_expr default_function_array_read_conversion (location_t,
struct c_expr);
extern struct c_expr convert_lvalue_to_rvalue (location_t, struct c_expr,
bool, bool);
extern void mark_exp_read (tree);
extern tree composite_type (tree, tree);
extern tree build_component_ref (location_t, tree, tree);
extern tree build_array_ref (location_t, tree, tree);
extern tree build_external_ref (location_t, tree, int, tree *);
extern void pop_maybe_used (bool);
extern struct c_expr c_expr_sizeof_expr (location_t, struct c_expr);
extern struct c_expr c_expr_sizeof_type (location_t, struct c_type_name *);
extern struct c_expr parser_build_unary_op (location_t, enum tree_code,
struct c_expr);
extern struct c_expr parser_build_binary_op (location_t,
enum tree_code, struct c_expr,
struct c_expr);
extern tree build_conditional_expr (location_t, tree, bool, tree, tree,
tree, tree);
extern tree build_compound_expr (location_t, tree, tree);
extern tree c_cast_expr (location_t, struct c_type_name *, tree);
extern tree build_c_cast (location_t, tree, tree);
extern void store_init_value (location_t, tree, tree, tree);
extern void error_init (const char *);
extern void pedwarn_init (location_t, int opt, const char *);
extern void maybe_warn_string_init (tree, struct c_expr);
extern void start_init (tree, tree, int);
extern void finish_init (void);
extern void really_start_incremental_init (tree);
extern void finish_implicit_inits (struct obstack *);
extern void push_init_level (int, struct obstack *);
extern struct c_expr pop_init_level (int, struct obstack *);
extern void set_init_index (tree, tree, struct obstack *);
extern void set_init_label (tree, struct obstack *);
extern void process_init_element (location_t, struct c_expr, bool,
struct obstack *);
extern tree build_compound_literal (location_t, tree, tree, bool);
extern void check_compound_literal_type (location_t, struct c_type_name *);
extern tree c_start_case (location_t, location_t, tree);
extern void c_finish_case (tree);
extern tree build_asm_expr (location_t, tree, tree, tree, tree, tree, bool);
extern tree build_asm_stmt (tree, tree);
extern int c_types_compatible_p (tree, tree);
extern tree c_begin_compound_stmt (bool);
extern tree c_end_compound_stmt (location_t, tree, bool);
extern void c_finish_if_stmt (location_t, tree, tree, tree, bool);
extern void c_finish_loop (location_t, tree, tree, tree, tree, tree, bool);
extern tree c_begin_stmt_expr (void);
extern tree c_finish_stmt_expr (location_t, tree);
extern tree c_process_expr_stmt (location_t, tree);
extern tree c_finish_expr_stmt (location_t, tree);
extern tree c_finish_return (location_t, tree, tree);
extern tree c_finish_bc_stmt (location_t, tree *, bool);
extern tree c_finish_goto_label (location_t, tree);
extern tree c_finish_goto_ptr (location_t, tree);
extern tree c_expr_to_decl (tree, bool *, bool *);
extern tree c_begin_omp_parallel (void);
extern tree c_finish_omp_parallel (location_t, tree, tree);
extern tree c_begin_omp_task (void);
extern tree c_finish_omp_task (location_t, tree, tree);
extern void c_finish_omp_cancel (location_t, tree);
extern void c_finish_omp_cancellation_point (location_t, tree);
extern tree c_finish_omp_clauses (tree);
extern tree c_build_va_arg (location_t, tree, tree);
extern tree c_finish_transaction (location_t, tree, int);
extern bool c_tree_equal (tree, tree);
extern tree c_build_function_call_vec (location_t, vec<location_t>, tree,
vec<tree, va_gc> *, vec<tree, va_gc> *);
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
extern int current_function_returns_value;
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
extern int current_function_returns_null;
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
extern int current_function_returns_abnormally;
/* Mode used to build pointers (VOIDmode means ptr_mode). */
extern enum machine_mode c_default_pointer_mode;
/* In c-decl.c */
extern void c_finish_incomplete_decl (tree);
extern void c_write_global_declarations (void);
extern tree c_omp_reduction_id (enum tree_code, tree);
extern tree c_omp_reduction_decl (tree);
extern tree c_omp_reduction_lookup (tree, tree);
extern tree c_check_omp_declare_reduction_r (tree *, int *, void *);
/* In c-errors.c */
extern void pedwarn_c90 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4);
extern void pedwarn_c99 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4);
#endif /* ! GCC_C_TREE_H */
|
phase.c | /*
* Copyright (C) 2018 by Benedict Paten (benedictpaten@gmail.com)
*
* Released under the MIT license, see LICENSE.txt
*/
#include <getopt.h>
#include <stdio.h>
#include <ctype.h>
#include <memory.h>
#include <hashTableC.h>
#include <unistd.h>
#include <time.h>
#include "marginVersion.h"
#include "margin.h"
#include "htsIntegration.h"
#include "helenFeatures.h"
/*
* Main functions
*/
void phase_usage() {
fprintf(stderr, "usage: margin phase <ALIGN_BAM> <REFERENCE_FASTA> <VARIANT_VCF> <PARAMS> [options]\n");
fprintf(stderr, "Version: %s \n\n", MARGIN_POLISH_VERSION_H);
fprintf(stderr, "Tags reads in ALIGN_BAM and phases variants in VARIANT_VCF.\n");
fprintf(stderr, "\nRequired arguments:\n");
fprintf(stderr, " ALIGN_BAM is the alignment of reads to the reference.\n");
fprintf(stderr, " REFERENCE_FASTA is the reference sequence BAM file in fasta format.\n");
fprintf(stderr, " VARIANT_VCF is the set of variants to use for phasing.\n");
fprintf(stderr, " PARAMS is the file with margin parameters.\n");
fprintf(stderr, "\nDefault options:\n");
fprintf(stderr, " -h --help : Print this help screen\n");
fprintf(stderr, " -a --logLevel : Set the log level [default = info]\n");
# ifdef _OPENMP
fprintf(stderr, " -t --threads : Set number of concurrent threads [default = 1]\n");
#endif
fprintf(stderr, " -o --outputBase : Name to use for output files [default = 'output']\n");
fprintf(stderr, " -r --region : If set, will only compute for given chromosomal region\n");
fprintf(stderr, " Format: chr:start_pos-end_pos (chr3:2000-3000)\n");
fprintf(stderr, " -p --depth : Will override the downsampling depth set in PARAMS\n");
fprintf(stderr, " -k --tempFilesToDisk : Write temporary files to disk (for --diploid or supplementary output)\n");
fprintf(stderr, "\nOutput options:\n");
fprintf(stderr, " -M --skipHaplotypeBAM : Do not write out phased BAM\n");
fprintf(stderr, " -v --phasePrimaryVariantsOnly : Skip step where filtered variants are phased using read haplotypes\n");
fprintf(stderr, " -V --skipPhasedVCF : Do not write out phased VCF\n");
fprintf(stderr, "\n");
}
int phase_main(int argc, char *argv[]) {
// Parameters / arguments
char *logLevelString = stString_copy("critical");
char *bamInFile = NULL;
char *paramsFile = NULL;
char *referenceFastaFile = NULL;
char *outputBase = stString_copy("output");
char *regionStr = NULL;
char *vcfFile = NULL;
int numThreads = 1;
int64_t maxDepth = -1;
bool inMemory = TRUE;
bool shouldOutputHaplotaggedBam = TRUE;
bool shouldOutputPhasedVcf = TRUE;
bool phasePrimaryVariantsOnly = FALSE;
if (argc < 4) {
free(outputBase);
free(logLevelString);
phase_usage();
return 0;
}
bamInFile = stString_copy(argv[1]);
referenceFastaFile = stString_copy(argv[2]);
vcfFile = stString_copy(argv[3]);
paramsFile = stString_copy(argv[4]);
// Parse the options
while (1) {
static struct option long_options[] = {
{ "help", no_argument, 0, 'h' },
{ "logLevel", required_argument, 0, 'a' },
# ifdef _OPENMP
{ "threads", required_argument, 0, 't'},
#endif
{ "outputBase", required_argument, 0, 'o'},
{ "region", required_argument, 0, 'r'},
{ "depth", required_argument, 0, 'p'},
{ "tempFilesToDisk", no_argument, 0, 'k'},
{ "skipHaplotypeBAM", no_argument, 0, 'M'},
{ "phasePrimaryVariantsOnly", no_argument, 0, 'v'},
{ "skipPhasedVCF", no_argument, 0, 'V'},
{ 0, 0, 0, 0 } };
int option_index = 0;
int key = getopt_long(argc-2, &argv[2], "ha:o:p:t:r:kMvV", long_options, &option_index);
if (key == -1) {
break;
}
switch (key) {
case 'a':
free(logLevelString);
logLevelString = stString_copy(optarg);
break;
case 'h':
phase_usage();
return 0;
case 'o':
free(outputBase);
outputBase = getFileBase(optarg, "output");
break;
case 'r':
regionStr = stString_copy(optarg);
break;
case 'p':
maxDepth = atoi(optarg);
if (maxDepth < 0) {
st_errAbort("Invalid maxDepth: %s", optarg);
}
break;
case 't':
numThreads = atoi(optarg);
if (numThreads <= 0) {
st_errAbort("Invalid thread count: %d", numThreads);
}
break;
case 'k':
inMemory = FALSE;
break;
case 'M':
shouldOutputHaplotaggedBam = FALSE;
break;
case 'V':
shouldOutputPhasedVcf = FALSE;
phasePrimaryVariantsOnly = TRUE;
break;
case 'v':
phasePrimaryVariantsOnly = TRUE;
break;
default:
phase_usage();
free(outputBase);
free(logLevelString);
free(bamInFile);
free(referenceFastaFile);
free(paramsFile);
return 0;
}
}
// sanity check (conflicting params)
if (!shouldOutputHaplotaggedBam && !shouldOutputPhasedVcf) {
st_errAbort("With --skipHaplotypeBAM and --skipPhasedVCF there will be no output.\n");
}
// sanity check (verify files exist)
if (access(bamInFile, R_OK) != 0) {
st_errAbort("Could not read from input bam file: %s\n", bamInFile);
char *idx = stString_print("%s.bai", bamInFile);
if (access(idx, R_OK) != 0) {
st_errAbort("BAM does not appear to be indexed: %s\n", bamInFile);
}
free(idx);
}
if (access(referenceFastaFile, R_OK) != 0) {
st_errAbort("Could not read from reference fastafile: %s\n", referenceFastaFile);
}
if (access(vcfFile, R_OK) != 0) {
st_errAbort("Could not read from vcf file: %s\n", vcfFile);
}
if (access(paramsFile, R_OK) != 0) {
st_errAbort("Could not read from params file: %s\n", paramsFile);
}
// Initialization from arguments
time_t startTime = time(NULL);
st_setLogLevelFromString(logLevelString);
free(logLevelString);
if (st_getLogLevel() >= info) {
st_setCallocDebug(true);
}
# ifdef _OPENMP
if (numThreads <= 0) {
numThreads = 1;
}
omp_set_num_threads(numThreads);
st_logCritical("Running OpenMP with %d threads.\n", omp_get_max_threads());
# endif
// Parse parameters
st_logCritical("> Parsing model parameters from file: %s\n", paramsFile);
Params *params = params_readParams(paramsFile);
// update depth (if set)
if (maxDepth >= 0) {
st_logCritical("> Changing maxDepth parameter from %"PRId64" to %"PRId64"\n", params->polishParams->maxDepth,
maxDepth);
params->polishParams->maxDepth = (uint64_t) maxDepth;
}
// Print a report of the parsed parameters
if (st_getLogLevel() == debug) {
params_printParameters(params, stderr);
}
// get vcf entries (if set)
stHash *vcfEntries = NULL;
if (vcfFile != NULL) {
vcfEntries = parseVcf2(vcfFile, regionStr, params);
}
// get valid contigs (to help bam chunker construction)
stList *vcfContigsTmp = stHash_getKeys(vcfEntries);
stSet *vcfContigs = stSet_construct3(stHash_stringKey, stHash_stringEqualKey, NULL);
for (int64_t i = 0; i < stList_length(vcfContigsTmp); i++) {
stSet_insert(vcfContigs, stList_get(vcfContigsTmp, i));
}
// get chunker for bam. if regionStr is NULL, it will be ignored
time_t chunkingStart = time(NULL);
BamChunker *bamChunker = bamChunker_construct2(bamInFile, regionStr, vcfContigs, params->polishParams, TRUE);
char *regionStrInformative = regionStr != NULL ? stString_copy(regionStr) : stString_join2(",", vcfContigsTmp);
st_logCritical(
"> Set up bam chunker in %"PRId64"s with chunk size %i and overlap %i (for region=%s), resulting in %i total chunks\n",
time(NULL) - chunkingStart, (int) bamChunker->chunkSize, (int) bamChunker->chunkBoundary,
regionStrInformative, bamChunker->chunkCount);
if (bamChunker->chunkCount == 0) {
st_errAbort("> Found no valid reads!\n");
}
free(regionStrInformative);
stList_destruct(vcfContigsTmp);
stSet_destruct(vcfContigs);
// print chunk info
char *outputChunksFile = stString_print("%s.chunks.csv", outputBase);
FILE *chunksOut = safe_fopen(outputChunksFile, "w");
for (int64_t i = 0; i < bamChunker->chunkCount; i++) {
BamChunk *c = stList_get(bamChunker->chunks, i);
fprintf(chunksOut, "%s,%"PRId64",%"PRId64",%"PRId64",%"PRId64"\n", c->refSeqName, c->chunkOverlapStart,
c->chunkOverlapEnd, c->chunkStart, c->chunkEnd);
}
fclose(chunksOut);
free(outputChunksFile);
// output chunker tracks intermediate output files
OutputChunkers *outputChunkers = outputChunkers_construct(numThreads, params, NULL, NULL, NULL, NULL,
".hap1", ".hap2", inMemory);
// (may) need to shuffle chunks
stList *chunkOrder = stList_construct3(0, (void (*)(void *)) stIntTuple_destruct);
for (int64_t i = 0; i < bamChunker->chunkCount; i++) {
stList_append(chunkOrder, stIntTuple_construct1(i));
}
if (params->polishParams->shuffleChunks) {
switch (params->polishParams->shuffleChunksMethod) {
case SCM_SIZE_DESC:
st_logCritical("> Ordering chunks by estimated depth\n");
stList_sort2(chunkOrder, compareBamChunkDepthByIndexInList, bamChunker->chunks);
stList_reverse(chunkOrder);
break;
case SCM_RANDOM:
st_logCritical("> Randomly shuffling chunks\n");
stList_shuffle(chunkOrder);
break;
}
}
// multiproccess the chunks, save to results
st_logCritical("> Setup complete, beginning run\n");
int64_t lastReportedPercentage = 0;
time_t polishStartTime = time(NULL);
# ifdef _OPENMP
#pragma omp parallel for schedule(dynamic,1)
# endif
for (int64_t i = 0; i < bamChunker->chunkCount; i++) {
int64_t chunkIdx = stIntTuple_get(stList_get(chunkOrder, i), 0);
// Time all chunks
time_t chunkStartTime = time(NULL);
// Get chunk
BamChunk *bamChunk = bamChunker_getChunk(bamChunker, chunkIdx);
// logging
char *logIdentifier;
bool logProgress = FALSE;
int64_t currentPercentage = (int64_t) (100 * i / bamChunker->chunkCount);
# ifdef _OPENMP
int64_t threadIdx = omp_get_thread_num();
logIdentifier = stString_print(" T%02d_C%05"PRId64, threadIdx, chunkIdx);
if (threadIdx == 0) {
if (currentPercentage != lastReportedPercentage) {
logProgress = TRUE;
lastReportedPercentage = currentPercentage;
}
}
# else
int64_t threadIdx = 0;
logIdentifier = stString_copy("");
if (currentPercentage != lastReportedPercentage) {
logProgress = TRUE;
lastReportedPercentage = currentPercentage;
}
# endif
// prints percentage complete and estimated time remaining
if (logProgress) {
// log progress
int64_t timeTaken = (int64_t) (time(NULL) - polishStartTime);
int64_t secondsRemaining = (int64_t) floor(1.0 * timeTaken / currentPercentage * (100 - currentPercentage));
char *timeElapsedDescriptor = getTimeDescriptorFromSeconds(timeTaken);
char *timeLeftDescriptor = (secondsRemaining == 0 && currentPercentage <= 50 ?
stString_print("unknown") : getTimeDescriptorFromSeconds(secondsRemaining));
st_logCritical("> Polishing %2"PRId64"%% complete (%"PRId64"/%"PRId64", %s). Estimated time remaining: %s\n",
currentPercentage, i, bamChunker->chunkCount, timeElapsedDescriptor, timeLeftDescriptor);
free(timeLeftDescriptor);
free(timeElapsedDescriptor);
}
// Get reference string for chunk of alignment
char *chunkReference = getSequenceFromReference(referenceFastaFile, bamChunk->refSeqName,
bamChunk->chunkOverlapStart, bamChunk->chunkOverlapEnd);
st_logInfo(">%s Going to process a chunk for reference sequence: %s, starting at: %i and ending at: %i\n",
logIdentifier, bamChunk->refSeqName, (int) bamChunk->chunkOverlapStart, bamChunk->chunkOverlapEnd);
// get VCF string
stList *chunkVcfEntries = stList_construct3(0, (void(*)(void*))vcfEntry_destruct);
stList *filteredChunkVcfEntries = stList_construct3(0, (void(*)(void*))vcfEntry_destruct);
getVcfEntriesForRegion(vcfEntries, chunkVcfEntries, filteredChunkVcfEntries, NULL,
bamChunk->refSeqName, bamChunk->chunkOverlapStart, bamChunk->chunkOverlapEnd, params);
// get alleles and read substrings for all vcf entries and a unified set of bcrs:
// we do main phasing with some and then phase the filtered var with same reads
// update vcf alleles
updateVcfEntriesWithSubstringsAndPositions(chunkVcfEntries, chunkReference, strlen(chunkReference),
FALSE, params);
if (!phasePrimaryVariantsOnly) {
updateVcfEntriesWithSubstringsAndPositions(filteredChunkVcfEntries, chunkReference, strlen(chunkReference),
FALSE, params);
}
// Convert bam lines into corresponding reads and alignments
st_logInfo(" %s Parsing input reads from file: %s\n", logIdentifier, bamInFile);
stList *reads = stList_construct3(0, (void (*)(void *)) bamChunkRead_destruct);
stList *filteredReads = stList_construct3(0, (void (*)(void *)) bamChunkRead_destruct);
stList *readsForFilteredVcfEntries = stList_construct3(0, (void (*)(void *)) bamChunkRead_destruct);
stList *filteredReadsForFilteredVcfEntries = stList_construct3(0, (void (*)(void *)) bamChunkRead_destruct);
extractReadSubstringsAtVariantPositions(bamChunk, chunkVcfEntries, reads, filteredReads, params);
if (!phasePrimaryVariantsOnly) {
extractReadSubstringsAtVariantPositions(bamChunk, filteredChunkVcfEntries, readsForFilteredVcfEntries,
filteredReadsForFilteredVcfEntries, params);
}
// do downsampling if appropriate
if (params->polishParams->maxDepth > 0) {
// get downsampling structures
stList *maintainedReads = stList_construct3(0, (void (*)(void *)) bamChunkRead_destruct);
bool didDownsample = downsampleBamChunkReadWithVcfEntrySubstringsViaFullReadLengthLikelihood(
params->polishParams->maxDepth, chunkVcfEntries, reads, maintainedReads, filteredReads);
// we need to destroy the discarded reads and structures
if (didDownsample) {
st_logInfo(" %s Downsampled from %"PRId64" to %"PRId64" reads\n", logIdentifier,
stList_length(reads), stList_length(maintainedReads));
// still has all the old reads, need to not free these
stList_setDestructor(reads, NULL);
stList_destruct(reads);
// and keep the filtered reads
reads = maintainedReads;
}
// no downsampling, we just need to free the (empty) objects
else {
assert(stList_length(maintainedReads) == 0);
stList_destruct(maintainedReads);
}
}
time_t primaryPhasingStart = time(NULL);
// iteratively find bubbles
BubbleGraph *bg = NULL;
stHash *readsToPSeqs = NULL;
stSet *readsBelongingToHap1 = NULL, *readsBelongingToHap2 = NULL;
stGenomeFragment *gf = NULL;
stReference *ref = NULL;
stList *vcfEntriesToBubbles = NULL;
// Get the bubble graph representation
bg = bubbleGraph_constructFromVCFAndBamChunkReadVcfEntrySubstrings(reads, chunkVcfEntries, params,
&vcfEntriesToBubbles);
// Now make a POA for each of the haplotypes and phase
ref = bubbleGraph_getReference(bg, bamChunk->refSeqName, params);
gf = bubbleGraph_phaseBubbleGraph(bg, ref, reads, params, &readsToPSeqs);
stGenomeFragment_phaseBamChunkReads(gf, readsToPSeqs, reads, &readsBelongingToHap1, &readsBelongingToHap2,
params->phaseParams);
st_logInfo(" %s After phasing, of %i reads got %i reads partitioned into hap1 and %i reads partitioned "
"into hap2 (%i unphased)\n", logIdentifier, (int) stList_length(reads),
(int) stSet_size(readsBelongingToHap1), (int) stSet_size(readsBelongingToHap2),
(int) (stList_length(reads) - stSet_size(readsBelongingToHap1) -
stSet_size(readsBelongingToHap2)));
st_logInfo(" %s Phased primary reads in %d sec\n", logIdentifier, time(NULL) - primaryPhasingStart);
// phase filtered variants (if we're generating a VCF)
if (!phasePrimaryVariantsOnly) {
bubbleGraph_phaseVcfEntriesFromHaplotaggedReads(readsForFilteredVcfEntries, filteredChunkVcfEntries,
readsBelongingToHap1, readsBelongingToHap2, bamChunk,
bamChunker->readEnumerator, params);
}
// assign filtered reads to haplotypes
for (int64_t bcrIdx = 0; bcrIdx < stList_length(reads); bcrIdx++) {
BamChunkRead *bcr = stList_get(reads, bcrIdx);
if (!stSet_search(readsBelongingToHap1, bcr) && !stSet_search(readsBelongingToHap2, bcr)) {
// was filtered in some form
stList_append(filteredReads, bamChunkRead_constructCopy(bcr));
}
}
st_logInfo(" %s Assigning %"PRId64" filtered reads to haplotypes\n", logIdentifier,
stList_length(filteredReads));
time_t filteredPhasingStart = time(NULL);
bubbleGraph_partitionFilteredReadsFromVcfEntries(filteredReads, gf, bg, vcfEntriesToBubbles,
readsBelongingToHap1,
readsBelongingToHap2, params, logIdentifier);
st_logInfo(" %s Partitioned filtered reads in %d sec.\n", logIdentifier, time(NULL) - filteredPhasingStart);
// Output
outputChunkers_processChunkSequencePhased(outputChunkers, threadIdx, chunkIdx, bamChunk->refSeqName,
NULL, NULL, reads, readsBelongingToHap1, readsBelongingToHap2, gf,
params);
// save
// only use primary reads (not filteredReads) to track read phasing
updateOriginalVcfEntriesWithBubbleData(bamChunk, reads, bamChunker->readEnumerator, gf, bg,
vcfEntriesToBubbles, readsBelongingToHap1, readsBelongingToHap2, logIdentifier);
// Cleanup
stList_destruct(chunkVcfEntries);
stList_destruct(filteredChunkVcfEntries);
stSet_destruct(readsBelongingToHap1);
stSet_destruct(readsBelongingToHap2);
bubbleGraph_destruct(bg);
stGenomeFragment_destruct(gf);
stReference_destruct(ref);
stHash_destruct(readsToPSeqs);
stList_destruct(vcfEntriesToBubbles);
free(chunkReference);
// report timing
if (st_getLogLevel() >= info) {
st_logInfo(">%s Chunk with ~%"PRId64" reads processed in %d sec\n",
logIdentifier, stList_length(reads) + stList_length(filteredReads), (int) (time(NULL) - chunkStartTime));
}
// final post-completion logging cleanup
stList_destruct(reads);
stList_destruct(filteredReads);
stList_destruct(readsForFilteredVcfEntries);
stList_destruct(filteredReadsForFilteredVcfEntries);
free(logIdentifier);
}
// for writing haplotyped chunks
stList *allReadIdsHap1 = stList_construct3(0, free);
stList *allReadIdsHap2 = stList_construct3(0, free);
// for writing vcf
bool *chunkWasSwitched = st_calloc(bamChunker->chunkCount, sizeof(bool));
// merge chunks
time_t mergeStartTime = time(NULL);
st_logCritical("> Starting merge\n");
outputChunkers_stitchAndTrackExtraData(outputChunkers, TRUE, bamChunker->chunkCount, allReadIdsHap1, allReadIdsHap2,
chunkWasSwitched);
time_t mergeEndTime = time(NULL);
char *tds = getTimeDescriptorFromSeconds((int) mergeEndTime - mergeStartTime);
st_logCritical(" Merging took %s\n", tds);
outputChunkers_destruct(outputChunkers);
free(tds);
tds = getTimeDescriptorFromSeconds((int) time(NULL) - mergeEndTime);
st_logInfo("> Merge cleanup took %s\n", tds);
free(tds);
// maybe write final haplotyped bams
if (shouldOutputHaplotaggedBam) {
// logging
time_t hapBamStart = time(NULL);
// get all reads
stSet *allReadIdsForHaplotypingHap1 = stSet_construct3(stHash_stringKey, stHash_stringEqualKey, NULL);
stSet *allReadIdsForHaplotypingHap2 = stSet_construct3(stHash_stringKey, stHash_stringEqualKey, NULL);
for (int64_t i = 0; i < stList_length(allReadIdsHap1); i++) {
stSet_insert(allReadIdsForHaplotypingHap1, stList_get(allReadIdsHap1, i));
}
for (int64_t i = 0; i < stList_length(allReadIdsHap2); i++) {
stSet_insert(allReadIdsForHaplotypingHap2, stList_get(allReadIdsHap2, i));
}
// write it
writeHaplotaggedBam(bamChunker->bamFile, outputBase, regionStr,
allReadIdsForHaplotypingHap1, allReadIdsForHaplotypingHap2, NULL, params, "");
// loggit
char *hapBamTDS = getTimeDescriptorFromSeconds(time(NULL) - hapBamStart);
st_logCritical("> Wrote haplotagged BAM in %s\n", hapBamTDS);
// cleanup
free(hapBamTDS);
stSet_destruct(allReadIdsForHaplotypingHap1);
stSet_destruct(allReadIdsForHaplotypingHap2);
}
// maybe write VCF
if (shouldOutputPhasedVcf) {
// loggit
time_t vcfWriteStart = time(NULL);
char *outputVcfFile = stString_print("%s.phased.vcf", outputBase);
char *outputPhaseSetFile = stString_print("%s.phaseset.bed", outputBase);
st_logCritical("> Writing phased VCF to %s, phaseset info to %s\n", outputVcfFile, outputPhaseSetFile);
// write it
updateHaplotypeSwitchingInVcfEntries(bamChunker, chunkWasSwitched, vcfEntries);
writePhasedVcf(vcfFile, regionStr, outputVcfFile, outputPhaseSetFile, vcfEntries, params);
// loggit
char *phasedVcfTDS = getTimeDescriptorFromSeconds(time(NULL) - vcfWriteStart);
st_logCritical("> Wrote phased VCF in %s\n", phasedVcfTDS);
// cleanup
free(phasedVcfTDS);
free(outputVcfFile);
free(outputPhaseSetFile);
}
// cleanup
free(chunkWasSwitched);
bamChunker_destruct(bamChunker);
params_destruct(params);
if (regionStr != NULL) free(regionStr);
stList_destruct(chunkOrder);
free(vcfFile);
stHash_destruct(vcfEntries);
if (allReadIdsHap1 != NULL) stList_destruct(allReadIdsHap1);
if (allReadIdsHap2 != NULL) stList_destruct(allReadIdsHap2);
free(outputBase);
free(bamInFile);
free(referenceFastaFile);
free(paramsFile);
// log completion
char *timeDescriptor = getTimeDescriptorFromSeconds(time(NULL) - startTime);
st_logCritical("> Finished phasing in %s.\n", timeDescriptor);
free(timeDescriptor);
// while(1); // Use this for testing for memory leaks
return 0;
}
|
matrix.c |
/******************************************************************************
* INCLUDES
*****************************************************************************/
#include "base.h"
#include "matrix.h"
#include "util.h"
#include "timer.h"
#include "splatt_lapack.h"
#include <math.h>
/******************************************************************************
* PRIVATE FUNCTIONS
*****************************************************************************/
/**
* @brief Form the Gram matrix from A^T * A.
*
* @param[out] neq_matrix The matrix to fill.
* @param aTa The individual Gram matrices.
* @param mode Which mode we are computing for.
* @param nmodes How many total modes.
* @param reg Regularization parameter (to add to the diagonal).
*/
static void p_form_gram(
matrix_t * neq_matrix,
matrix_t * * aTa,
idx_t const mode,
idx_t const nmodes,
val_t const reg)
{
/* nfactors */
splatt_blas_int N = aTa[0]->J;
/* form upper-triangual normal equations */
val_t * const restrict neqs = neq_matrix->vals;
#pragma omp parallel
{
/* first initialize with 1s */
#pragma omp for schedule(static, 1)
for(splatt_blas_int i=0; i < N; ++i) {
neqs[i+(i*N)] = 1. + reg;
for(splatt_blas_int j=0; j < N; ++j) {
neqs[j+(i*N)] = 1.;
}
}
/* now Hadamard product all (A^T * A) matrices */
for(idx_t m=0; m < nmodes; ++m) {
if(m == mode) {
continue;
}
val_t const * const restrict mat = aTa[m]->vals;
#pragma omp for schedule(static, 1)
for(splatt_blas_int i=0; i < N; ++i) {
/*
* `mat` is symmetric but stored upper right triangular, so be careful
* to only access that.
*/
/* copy upper triangle */
for(splatt_blas_int j=i; j < N; ++j) {
neqs[j+(i*N)] *= mat[j+(i*N)];
}
}
} /* foreach mode */
#pragma omp barrier
/* now copy lower triangular */
#pragma omp for schedule(static, 1)
for(splatt_blas_int i=0; i < N; ++i) {
for(splatt_blas_int j=0; j < i; ++j) {
neqs[j+(i*N)] = neqs[i+(j*N)];
}
}
} /* omp parallel */
}
static void p_mat_2norm(
matrix_t * const A,
val_t * const restrict lambda,
rank_info * const rinfo,
thd_info * const thds)
{
idx_t const I = A->I;
idx_t const J = A->J;
val_t * const restrict vals = A->vals;
#pragma omp parallel
{
int const tid = splatt_omp_get_thread_num();
val_t * const mylambda = (val_t *) thds[tid].scratch[0];
for(idx_t j=0; j < J; ++j) {
mylambda[j] = 0;
}
#pragma omp for schedule(static)
for(idx_t i=0; i < I; ++i) {
for(idx_t j=0; j < J; ++j) {
mylambda[j] += vals[j + (i*J)] * vals[j + (i*J)];
}
}
/* do reduction on partial sums */
thd_reduce(thds, 0, J, REDUCE_SUM);
#pragma omp master
{
#ifdef SPLATT_USE_MPI
/* now do an MPI reduction to get the global lambda */
timer_start(&timers[TIMER_MPI_NORM]);
timer_start(&timers[TIMER_MPI_COMM]);
MPI_Allreduce(mylambda, lambda, J, SPLATT_MPI_VAL, MPI_SUM, rinfo->comm_3d);
timer_stop(&timers[TIMER_MPI_COMM]);
timer_stop(&timers[TIMER_MPI_NORM]);
#else
memcpy(lambda, mylambda, J * sizeof(val_t));
#endif
}
#pragma omp barrier
#pragma omp for schedule(static)
for(idx_t j=0; j < J; ++j) {
lambda[j] = sqrt(lambda[j]);
}
/* do the normalization */
#pragma omp for schedule(static)
for(idx_t i=0; i < I; ++i) {
for(idx_t j=0; j < J; ++j) {
vals[j+(i*J)] /= lambda[j];
}
}
} /* end omp for */
}
static void p_mat_maxnorm(
matrix_t * const A,
val_t * const restrict lambda,
rank_info * const rinfo,
thd_info * const thds)
{
idx_t const I = A->I;
idx_t const J = A->J;
val_t * const restrict vals = A->vals;
#pragma omp parallel
{
int const tid = splatt_omp_get_thread_num();
val_t * const mylambda = (val_t *) thds[tid].scratch[0];
for(idx_t j=0; j < J; ++j) {
mylambda[j] = 0;
}
#pragma omp for schedule(static)
for(idx_t i=0; i < I; ++i) {
for(idx_t j=0; j < J; ++j) {
mylambda[j] = SS_MAX(mylambda[j], vals[j+(i*J)]);
}
}
/* do reduction on partial maxes */
thd_reduce(thds, 0, J, REDUCE_MAX);
#pragma omp master
{
#ifdef SPLATT_USE_MPI
/* now do an MPI reduction to get the global lambda */
timer_start(&timers[TIMER_MPI_NORM]);
timer_start(&timers[TIMER_MPI_COMM]);
MPI_Allreduce(mylambda, lambda, J, SPLATT_MPI_VAL, MPI_MAX, rinfo->comm_3d);
timer_stop(&timers[TIMER_MPI_COMM]);
timer_stop(&timers[TIMER_MPI_NORM]);
#else
memcpy(lambda, mylambda, J * sizeof(val_t));
#endif
}
#pragma omp barrier
#pragma omp for schedule(static)
for(idx_t j=0; j < J; ++j) {
lambda[j] = SS_MAX(lambda[j], 1.);
}
/* do the normalization */
#pragma omp for schedule(static)
for(idx_t i=0; i < I; ++i) {
for(idx_t j=0; j < J; ++j) {
vals[j+(i*J)] /= lambda[j];
}
}
} /* end omp parallel */
}
/**
* @brief Solve the system LX = B.
*
* @param L The lower triangular matrix of coefficients.
* @param B The right-hand side which is overwritten with X.
*/
static void p_mat_forwardsolve(
matrix_t const * const L,
matrix_t * const B)
{
/* check dimensions */
idx_t const N = L->I;
val_t const * const restrict lv = L->vals;
val_t * const restrict bv = B->vals;
/* first row of X is easy */
for(idx_t j=0; j < N; ++j) {
bv[j] /= lv[0];
}
/* now do forward substitution */
for(idx_t i=1; i < N; ++i) {
/* X(i,f) = B(i,f) - \sum_{j=0}^{i-1} L(i,j)X(i,j) */
for(idx_t j=0; j < i; ++j) {
for(idx_t f=0; f < N; ++f) {
bv[f+(i*N)] -= lv[j+(i*N)] * bv[f+(j*N)];
}
}
for(idx_t f=0; f < N; ++f) {
bv[f+(i*N)] /= lv[i+(i*N)];
}
}
}
/**
* @brief Solve the system UX = B.
*
* @param U The upper triangular matrix of coefficients.
* @param B The right-hand side which is overwritten with X.
*/
static void p_mat_backwardsolve(
matrix_t const * const U,
matrix_t * const B)
{
/* check dimensions */
idx_t const N = U->I;
val_t const * const restrict rv = U->vals;
val_t * const restrict bv = B->vals;
/* last row of X is easy */
for(idx_t f=0; f < N; ++f) {
idx_t const i = N-1;
bv[f+(i*N)] /= rv[i+(i*N)];
}
/* now do backward substitution */
for(idx_t row=2; row <= N; ++row) {
/* operate with (N - row) to make unsigned comparisons easy */
idx_t const i = N - row;
/* X(i,f) = B(i,f) - \sum_{j=0}^{i-1} R(i,j)X(i,j) */
for(idx_t j=i+1; j < N; ++j) {
for(idx_t f=0; f < N; ++f) {
bv[f+(i*N)] -= rv[j+(i*N)] * bv[f+(j*N)];
}
}
for(idx_t f=0; f < N; ++f) {
bv[f+(i*N)] /= rv[i+(i*N)];
}
}
}
/******************************************************************************
* PUBLIC FUNCTIONS
*****************************************************************************/
void mat_syminv(
matrix_t * const A)
{
/* check dimensions */
assert(A->I == A->J);
idx_t const N = A->I;
matrix_t * L = mat_alloc(N, N);
/* do a Cholesky factorization on A */
mat_cholesky(A, L);
/* setup identity matrix */
memset(A->vals, 0, N*N*sizeof(val_t));
for(idx_t n=0; n < N; ++n) {
A->vals[n+(n*N)] = 1.;
}
/* Solve L*Y = I */
p_mat_forwardsolve(L, A);
/* transpose L */
for(idx_t i=0; i < N; ++i) {
for(idx_t j=i+1; j < N; ++j) {
L->vals[j+(i*N)] = L->vals[i+(j*N)];
L->vals[i+(j*N)] = 0.;
}
}
/* Solve U*A = Y */
p_mat_backwardsolve(L, A);
mat_free(L);
}
void mat_cholesky(
matrix_t const * const A,
matrix_t * const L)
{
/* check dimensions */
assert(A->I == A->J);
assert(A->I == L->J);
assert(L->I == L->J);
idx_t const N = A->I;
val_t const * const restrict av = A->vals;
val_t * const restrict lv = L->vals;
memset(lv, 0, N*N*sizeof(val_t));
for (idx_t i = 0; i < N; ++i) {
for (idx_t j = 0; j <= i; ++j) {
val_t inner = 0;
for (idx_t k = 0; k < j; ++k) {
inner += lv[k+(i*N)] * lv[k+(j*N)];
}
if(i == j) {
lv[j+(i*N)] = sqrt(av[i+(i*N)] - inner);
} else {
lv[j+(i*N)] = 1.0 / lv[j+(j*N)] * (av[j+(i*N)] - inner);
}
}
}
}
void mat_aTa_hada(
matrix_t ** mats,
idx_t const start,
idx_t const nmults,
idx_t const nmats,
matrix_t * const buf,
matrix_t * const ret)
{
idx_t const F = mats[0]->J;
/* check matrix dimensions */
assert(ret->I == ret->J);
assert(ret->I == F);
assert(buf->I == F);
assert(buf->J == F);
assert(ret->vals != NULL);
assert(mats[0]->rowmajor);
assert(ret->rowmajor);
val_t * const restrict rv = ret->vals;
val_t * const restrict bufv = buf->vals;
for(idx_t i=0; i < F; ++i) {
for(idx_t j=i; j < F; ++j) {
rv[j+(i*F)] = 1.;
}
}
for(idx_t mode=0; mode < nmults; ++mode) {
idx_t const m = (start+mode) % nmats;
idx_t const I = mats[m]->I;
val_t const * const Av = mats[m]->vals;
memset(bufv, 0, F * F * sizeof(val_t));
/* compute upper triangular matrix */
for(idx_t i=0; i < I; ++i) {
for(idx_t mi=0; mi < F; ++mi) {
for(idx_t mj=mi; mj < F; ++mj) {
bufv[mj + (mi*F)] += Av[mi + (i*F)] * Av[mj + (i*F)];
}
}
}
/* hadamard product */
for(idx_t mi=0; mi < F; ++mi) {
for(idx_t mj=mi; mj < F; ++mj) {
rv[mj + (mi*F)] *= bufv[mj + (mi*F)];
}
}
}
/* copy to lower triangular matrix */
for(idx_t i=1; i < F; ++i) {
for(idx_t j=0; j < i; ++j) {
rv[j + (i*F)] = rv[i + (j*F)];
}
}
}
void mat_aTa(
matrix_t const * const A,
matrix_t * const ret,
rank_info * const rinfo,
thd_info * const thds,
idx_t const nthreads)
{
timer_start(&timers[TIMER_ATA]);
/* check matrix dimensions */
assert(ret->I == ret->J);
assert(ret->I == A->J);
assert(ret->vals != NULL);
assert(A->rowmajor);
assert(ret->rowmajor);
idx_t const I = A->I;
idx_t const F = A->J;
val_t const * const restrict Av = A->vals;
char uplo = 'L';
char trans = 'N'; /* actually do A * A' due to row-major ordering */
splatt_blas_int N = (splatt_blas_int) F;
splatt_blas_int K = (splatt_blas_int) I;
splatt_blas_int lda = N;
splatt_blas_int ldc = N;
val_t alpha = 1.;
val_t beta = 0.;
SPLATT_BLAS(syrk)(&uplo, &trans, &N, &K, &alpha, A->vals, &lda, &beta, ret->vals,
&ldc);
#ifdef SPLATT_USE_MPI
timer_start(&timers[TIMER_MPI_ATA]);
timer_start(&timers[TIMER_MPI_COMM]);
MPI_Allreduce(MPI_IN_PLACE, ret->vals, F * F, SPLATT_MPI_VAL, MPI_SUM,
rinfo->comm_3d);
timer_stop(&timers[TIMER_MPI_COMM]);
timer_stop(&timers[TIMER_MPI_ATA]);
#endif
timer_stop(&timers[TIMER_ATA]);
}
void mat_matmul(
matrix_t const * const A,
matrix_t const * const B,
matrix_t * const C)
{
timer_start(&timers[TIMER_MATMUL]);
/* check dimensions */
assert(A->J == B->I);
assert(C->I * C->J <= A->I * B->J);
/* set dimensions */
C->I = A->I;
C->J = B->J;
val_t const * const restrict av = A->vals;
val_t const * const restrict bv = B->vals;
val_t * const restrict cv = C->vals;
idx_t const M = A->I;
idx_t const N = B->J;
idx_t const Na = A->J;
/* tiled matrix multiplication */
idx_t const TILE = 16;
#pragma omp parallel for schedule(static)
for(idx_t i=0; i < M; ++i) {
for(idx_t jt=0; jt < N; jt += TILE) {
for(idx_t kt=0; kt < Na; kt += TILE) {
idx_t const JSTOP = SS_MIN(jt+TILE, N);
for(idx_t j=jt; j < JSTOP; ++j) {
val_t accum = 0;
idx_t const KSTOP = SS_MIN(kt+TILE, Na);
for(idx_t k=kt; k < KSTOP; ++k) {
accum += av[k + (i*Na)] * bv[j + (k*N)];
}
cv[j + (i*N)] += accum;
}
}
}
}
timer_stop(&timers[TIMER_MATMUL]);
}
void mat_normalize(
matrix_t * const A,
val_t * const restrict lambda,
splatt_mat_norm const which,
rank_info * const rinfo,
thd_info * const thds,
idx_t const nthreads)
{
timer_start(&timers[TIMER_MATNORM]);
splatt_omp_set_num_threads(nthreads);
switch(which) {
case MAT_NORM_2:
p_mat_2norm(A, lambda, rinfo, thds);
break;
case MAT_NORM_MAX:
p_mat_maxnorm(A, lambda, rinfo, thds);
break;
default:
fprintf(stderr, "SPLATT: mat_normalize supports 2 and MAX only.\n");
abort();
}
timer_stop(&timers[TIMER_MATNORM]);
}
void mat_solve_normals(
idx_t const mode,
idx_t const nmodes,
matrix_t * * aTa,
matrix_t * rhs,
val_t const reg)
{
timer_start(&timers[TIMER_INV]);
/* nfactors */
splatt_blas_int N = aTa[0]->J;
p_form_gram(aTa[MAX_NMODES], aTa, mode, nmodes, reg);
splatt_blas_int info;
char uplo = 'L';
splatt_blas_int lda = N;
splatt_blas_int ldb = N;
splatt_blas_int order = N;
splatt_blas_int nrhs = (splatt_blas_int) rhs->I;
val_t * const neqs = aTa[MAX_NMODES]->vals;
/* Cholesky factorization */
bool is_spd = true;
SPLATT_BLAS(potrf)(&uplo, &order, neqs, &lda, &info);
if(info) {
fprintf(stderr, "SPLATT: Gram matrix is not SPD. Trying `GELSS`.\n");
is_spd = false;
}
/* Continue with Cholesky */
if(is_spd) {
/* Solve against rhs */
SPLATT_BLAS(potrs)(&uplo, &order, &nrhs, neqs, &lda, rhs->vals, &ldb, &info);
if(info) {
fprintf(stderr, "SPLATT: DPOTRS returned %d\n", info);
}
} else {
/* restore gram matrix */
p_form_gram(aTa[MAX_NMODES], aTa, mode, nmodes, reg);
splatt_blas_int effective_rank;
val_t * conditions = splatt_malloc(N * sizeof(*conditions));
/* query worksize */
splatt_blas_int lwork = -1;
val_t rcond = -1.0f;
val_t work_query;
SPLATT_BLAS(gelss)(&N, &N, &nrhs,
neqs, &lda,
rhs->vals, &ldb,
conditions, &rcond, &effective_rank,
&work_query, &lwork, &info);
lwork = (splatt_blas_int) work_query;
/* setup workspace */
val_t * work = splatt_malloc(lwork * sizeof(*work));
/* Use an SVD solver */
SPLATT_BLAS(gelss)(&N, &N, &nrhs,
neqs, &lda,
rhs->vals, &ldb,
conditions, &rcond, &effective_rank,
work, &lwork, &info);
if(info) {
printf("SPLATT: DGELSS returned %d\n", info);
}
printf("SPLATT: DGELSS effective rank: %d\n", effective_rank);
splatt_free(conditions);
splatt_free(work);
}
timer_stop(&timers[TIMER_INV]);
}
void calc_gram_inv(
idx_t const mode,
idx_t const nmodes,
matrix_t ** aTa)
{
timer_start(&timers[TIMER_INV]);
idx_t const rank = aTa[0]->J;
val_t * const restrict av = aTa[MAX_NMODES]->vals;
/* ata[MAX_NMODES] = hada(aTa[0], aTa[1], ...) */
for(idx_t x=0; x < rank*rank; ++x) {
av[x] = 1.;
}
for(idx_t m=1; m < nmodes; ++m) {
idx_t const madjust = (mode + m) % nmodes;
val_t const * const vals = aTa[madjust]->vals;
for(idx_t x=0; x < rank*rank; ++x) {
av[x] *= vals[x];
}
}
/* M2 = M2^-1 */
mat_syminv(aTa[MAX_NMODES]);
timer_stop(&timers[TIMER_INV]);
}
matrix_t * mat_alloc(
idx_t const nrows,
idx_t const ncols)
{
matrix_t * mat = (matrix_t *) splatt_malloc(sizeof(matrix_t));
mat->I = nrows;
mat->J = ncols;
mat->vals = (val_t *) splatt_malloc(nrows * ncols * sizeof(val_t));
mat->rowmajor = 1;
return mat;
}
matrix_t * mat_rand(
idx_t const nrows,
idx_t const ncols)
{
matrix_t * mat = mat_alloc(nrows, ncols);
val_t * const vals = mat->vals;
fill_rand(vals, nrows * ncols);
return mat;
}
void mat_free(
matrix_t * mat)
{
free(mat->vals);
free(mat);
}
matrix_t * mat_mkrow(
matrix_t const * const mat)
{
assert(mat->rowmajor == 0);
idx_t const I = mat->I;
idx_t const J = mat->J;
matrix_t * row = mat_alloc(I, J);
val_t * const restrict rowv = row->vals;
val_t const * const restrict colv = mat->vals;
for(idx_t i=0; i < I; ++i) {
for(idx_t j=0; j < J; ++j) {
rowv[j + (i*J)] = colv[i + (j*I)];
}
}
return row;
}
matrix_t * mat_mkcol(
matrix_t const * const mat)
{
assert(mat->rowmajor == 1);
idx_t const I = mat->I;
idx_t const J = mat->J;
matrix_t * col = mat_alloc(I, J);
val_t * const restrict colv = col->vals;
val_t const * const restrict rowv = mat->vals;
for(idx_t i=0; i < I; ++i) {
for(idx_t j=0; j < J; ++j) {
colv[i + (j*I)] = rowv[j + (i*J)];
}
}
col->rowmajor = 0;
return col;
}
spmatrix_t * spmat_alloc(
idx_t const nrows,
idx_t const ncols,
idx_t const nnz)
{
spmatrix_t * mat = (spmatrix_t*) splatt_malloc(sizeof(spmatrix_t));
mat->I = nrows;
mat->J = ncols;
mat->nnz = nnz;
mat->rowptr = (idx_t*) splatt_malloc((nrows+1) * sizeof(idx_t));
mat->colind = (idx_t*) splatt_malloc(nnz * sizeof(idx_t));
mat->vals = (val_t*) splatt_malloc(nnz * sizeof(val_t));
return mat;
}
void spmat_free(
spmatrix_t * mat)
{
free(mat->rowptr);
free(mat->colind);
free(mat->vals);
free(mat);
}
|
pzgssvx.c | /*! \file
Copyright (c) 2003, The Regents of the University of California, through
Lawrence Berkeley National Laboratory (subject to receipt of any required
approvals from U.S. Dept. of Energy)
All rights reserved.
The source code is distributed under BSD license, see the file License.txt
at the top-level directory.
*/
/*! @file
* \brief Solves a system of linear equations A*X=B
*
* <pre>
* -- Distributed SuperLU routine (version 6.0) --
* Lawrence Berkeley National Lab, Univ. of California Berkeley.
* November 1, 2007
* October 22, 2012
* October 1, 2014
* April 5, 2015
* December 31, 2015 version 4.3
* December 31, 2016 version 5.1.3
* April 10, 2018 version 5.3
* September 18, 2018 version 6.0
* </pre>
*/
#include <math.h>
#include "superlu_zdefs.h"
/*! \brief
*
* <pre>
* Purpose
* =======
*
* PZGSSVX solves a system of linear equations A*X=B,
* by using Gaussian elimination with "static pivoting" to
* compute the LU factorization of A.
*
* Static pivoting is a technique that combines the numerical stability
* of partial pivoting with the scalability of Cholesky (no pivoting),
* to run accurately and efficiently on large numbers of processors.
* See our paper at http://www.nersc.gov/~xiaoye/SuperLU/ for a detailed
* description of the parallel algorithms.
*
* The input matrices A and B are distributed by block rows.
* Here is a graphical illustration (0-based indexing):
*
* A B
* 0 --------------- ------
* | | | |
* | | P0 | |
* | | | |
* --------------- ------
* - fst_row->| | | |
* | | | | |
* m_loc | | P1 | |
* | | | | |
* - | | | |
* --------------- ------
* | . | |. |
* | . | |. |
* | . | |. |
* --------------- ------
*
* where, fst_row is the row number of the first row,
* m_loc is the number of rows local to this processor
* These are defined in the 'SuperMatrix' structure, see supermatrix.h.
*
*
* Here are the options for using this code:
*
* 1. Independent of all the other options specified below, the
* user must supply
*
* - B, the matrix of right-hand sides, distributed by block rows,
* and its dimensions ldb (local) and nrhs (global)
* - grid, a structure describing the 2D processor mesh
* - options->IterRefine, which determines whether or not to
* improve the accuracy of the computed solution using
* iterative refinement
*
* On output, B is overwritten with the solution X.
*
* 2. Depending on options->Fact, the user has four options
* for solving A*X=B. The standard option is for factoring
* A "from scratch". (The other options, described below,
* are used when A is sufficiently similar to a previously
* solved problem to save time by reusing part or all of
* the previous factorization.)
*
* - options->Fact = DOFACT: A is factored "from scratch"
*
* In this case the user must also supply
*
* o A, the input matrix
*
* as well as the following options to determine what matrix to
* factorize.
*
* o options->Equil, to specify how to scale the rows and columns
* of A to "equilibrate" it (to try to reduce its
* condition number and so improve the
* accuracy of the computed solution)
*
* o options->RowPerm, to specify how to permute the rows of A
* (typically to control numerical stability)
*
* o options->ColPerm, to specify how to permute the columns of A
* (typically to control fill-in and enhance
* parallelism during factorization)
*
* o options->ReplaceTinyPivot, to specify how to deal with tiny
* pivots encountered during factorization
* (to control numerical stability)
*
* The outputs returned include
*
* o ScalePermstruct, modified to describe how the input matrix A
* was equilibrated and permuted:
* . ScalePermstruct->DiagScale, indicates whether the rows and/or
* columns of A were scaled
* . ScalePermstruct->R, array of row scale factors
* . ScalePermstruct->C, array of column scale factors
* . ScalePermstruct->perm_r, row permutation vector
* . ScalePermstruct->perm_c, column permutation vector
*
* (part of ScalePermstruct may also need to be supplied on input,
* depending on options->RowPerm and options->ColPerm as described
* later).
*
* o A, the input matrix A overwritten by the scaled and permuted
* matrix diag(R)*A*diag(C)*Pc^T, where
* Pc is the row permutation matrix determined by
* ScalePermstruct->perm_c
* diag(R) and diag(C) are diagonal scaling matrices determined
* by ScalePermstruct->DiagScale, ScalePermstruct->R and
* ScalePermstruct->C
*
* o LUstruct, which contains the L and U factorization of A1 where
*
* A1 = Pc*Pr*diag(R)*A*diag(C)*Pc^T = L*U
*
* (Note that A1 = Pc*Pr*Aout, where Aout is the matrix stored
* in A on output.)
*
* 3. The second value of options->Fact assumes that a matrix with the same
* sparsity pattern as A has already been factored:
*
* - options->Fact = SamePattern: A is factored, assuming that it has
* the same nonzero pattern as a previously factored matrix. In
* this case the algorithm saves time by reusing the previously
* computed column permutation vector stored in
* ScalePermstruct->perm_c and the "elimination tree" of A
* stored in LUstruct->etree
*
* In this case the user must still specify the following options
* as before:
*
* o options->Equil
* o options->RowPerm
* o options->ReplaceTinyPivot
*
* but not options->ColPerm, whose value is ignored. This is because the
* previous column permutation from ScalePermstruct->perm_c is used as
* input. The user must also supply
*
* o A, the input matrix
* o ScalePermstruct->perm_c, the column permutation
* o LUstruct->etree, the elimination tree
*
* The outputs returned include
*
* o A, the input matrix A overwritten by the scaled and permuted
* matrix as described above
* o ScalePermstruct, modified to describe how the input matrix A was
* equilibrated and row permuted
* o LUstruct, modified to contain the new L and U factors
*
* 4. The third value of options->Fact assumes that a matrix B with the same
* sparsity pattern as A has already been factored, and where the
* row permutation of B can be reused for A. This is useful when A and B
* have similar numerical values, so that the same row permutation
* will make both factorizations numerically stable. This lets us reuse
* all of the previously computed structure of L and U.
*
* - options->Fact = SamePattern_SameRowPerm: A is factored,
* assuming not only the same nonzero pattern as the previously
* factored matrix B, but reusing B's row permutation.
*
* In this case the user must still specify the following options
* as before:
*
* o options->Equil
* o options->ReplaceTinyPivot
*
* but not options->RowPerm or options->ColPerm, whose values are
* ignored. This is because the permutations from ScalePermstruct->perm_r
* and ScalePermstruct->perm_c are used as input.
*
* The user must also supply
*
* o A, the input matrix
* o ScalePermstruct->DiagScale, how the previous matrix was row
* and/or column scaled
* o ScalePermstruct->R, the row scalings of the previous matrix,
* if any
* o ScalePermstruct->C, the columns scalings of the previous matrix,
* if any
* o ScalePermstruct->perm_r, the row permutation of the previous
* matrix
* o ScalePermstruct->perm_c, the column permutation of the previous
* matrix
* o all of LUstruct, the previously computed information about
* L and U (the actual numerical values of L and U
* stored in LUstruct->Llu are ignored)
*
* The outputs returned include
*
* o A, the input matrix A overwritten by the scaled and permuted
* matrix as described above
* o ScalePermstruct, modified to describe how the input matrix A was
* equilibrated (thus ScalePermstruct->DiagScale,
* R and C may be modified)
* o LUstruct, modified to contain the new L and U factors
*
* 5. The fourth and last value of options->Fact assumes that A is
* identical to a matrix that has already been factored on a previous
* call, and reuses its entire LU factorization
*
* - options->Fact = Factored: A is identical to a previously
* factorized matrix, so the entire previous factorization
* can be reused.
*
* In this case all the other options mentioned above are ignored
* (options->Equil, options->RowPerm, options->ColPerm,
* options->ReplaceTinyPivot)
*
* The user must also supply
*
* o A, the unfactored matrix, only in the case that iterative
* refinement is to be done (specifically A must be the output
* A from the previous call, so that it has been scaled and permuted)
* o all of ScalePermstruct
* o all of LUstruct, including the actual numerical values of
* L and U
*
* all of which are unmodified on output.
*
* Arguments
* =========
*
* options (input) superlu_dist_options_t* (global)
* The structure defines the input parameters to control
* how the LU decomposition will be performed.
* The following fields should be defined for this structure:
*
* o Fact (fact_t)
* Specifies whether or not the factored form of the matrix
* A is supplied on entry, and if not, how the matrix A should
* be factorized based on the previous history.
*
* = DOFACT: The matrix A will be factorized from scratch.
* Inputs: A
* options->Equil, RowPerm, ColPerm, ReplaceTinyPivot
* Outputs: modified A
* (possibly row and/or column scaled and/or
* permuted)
* all of ScalePermstruct
* all of LUstruct
*
* = SamePattern: the matrix A will be factorized assuming
* that a factorization of a matrix with the same sparsity
* pattern was performed prior to this one. Therefore, this
* factorization will reuse column permutation vector
* ScalePermstruct->perm_c and the elimination tree
* LUstruct->etree
* Inputs: A
* options->Equil, RowPerm, ReplaceTinyPivot
* ScalePermstruct->perm_c
* LUstruct->etree
* Outputs: modified A
* (possibly row and/or column scaled and/or
* permuted)
* rest of ScalePermstruct (DiagScale, R, C, perm_r)
* rest of LUstruct (GLU_persist, Llu)
*
* = SamePattern_SameRowPerm: the matrix A will be factorized
* assuming that a factorization of a matrix with the same
* sparsity pattern and similar numerical values was performed
* prior to this one. Therefore, this factorization will reuse
* both row and column scaling factors R and C, and the
* both row and column permutation vectors perm_r and perm_c,
* distributed data structure set up from the previous symbolic
* factorization.
* Inputs: A
* options->Equil, ReplaceTinyPivot
* all of ScalePermstruct
* all of LUstruct
* Outputs: modified A
* (possibly row and/or column scaled and/or
* permuted)
* modified LUstruct->Llu
* = FACTORED: the matrix A is already factored.
* Inputs: all of ScalePermstruct
* all of LUstruct
*
* o Equil (yes_no_t)
* Specifies whether to equilibrate the system.
* = NO: no equilibration.
* = YES: scaling factors are computed to equilibrate the system:
* diag(R)*A*diag(C)*inv(diag(C))*X = diag(R)*B.
* Whether or not the system will be equilibrated depends
* on the scaling of the matrix A, but if equilibration is
* used, A is overwritten by diag(R)*A*diag(C) and B by
* diag(R)*B.
*
* o RowPerm (rowperm_t)
* Specifies how to permute rows of the matrix A.
* = NATURAL: use the natural ordering.
* = LargeDiag_MC64: use the Duff/Koster algorithm to permute rows
* of the original matrix to make the diagonal large
* relative to the off-diagonal.
* = LargeDiag_APWM: use the parallel approximate-weight perfect
* matching to permute rows of the original matrix
* to make the diagonal large relative to the
* off-diagonal.
* = MY_PERMR: use the ordering given in ScalePermstruct->perm_r
* input by the user.
*
* o ColPerm (colperm_t)
* Specifies what type of column permutation to use to reduce fill.
* = NATURAL: natural ordering.
* = MMD_AT_PLUS_A: minimum degree ordering on structure of A'+A.
* = MMD_ATA: minimum degree ordering on structure of A'*A.
* = MY_PERMC: the ordering given in ScalePermstruct->perm_c.
*
* o ReplaceTinyPivot (yes_no_t)
* = NO: do not modify pivots
* = YES: replace tiny pivots by sqrt(epsilon)*norm(A) during
* LU factorization.
*
* o IterRefine (IterRefine_t)
* Specifies how to perform iterative refinement.
* = NO: no iterative refinement.
* = SLU_DOUBLE: accumulate residual in double precision.
* = SLU_EXTRA: accumulate residual in extra precision.
*
* NOTE: all options must be identical on all processes when
* calling this routine.
*
* A (input/output) SuperMatrix* (local)
* On entry, matrix A in A*X=B, of dimension (A->nrow, A->ncol).
* The number of linear equations is A->nrow. The type of A must be:
* Stype = SLU_NR_loc; Dtype = SLU_D; Mtype = SLU_GE.
* That is, A is stored in distributed compressed row format.
* See supermatrix.h for the definition of 'SuperMatrix'.
* This routine only handles square A, however, the LU factorization
* routine PDGSTRF can factorize rectangular matrices.
* On exit, A may be overwtirren by diag(R)*A*diag(C)*Pc^T,
* depending on ScalePermstruct->DiagScale and options->ColPerm:
* if ScalePermstruct->DiagScale != NOEQUIL, A is overwritten by
* diag(R)*A*diag(C).
* if options->ColPerm != NATURAL, A is further overwritten by
* diag(R)*A*diag(C)*Pc^T.
* If all the above condition are true, the LU decomposition is
* performed on the matrix Pc*Pr*diag(R)*A*diag(C)*Pc^T.
*
* ScalePermstruct (input/output) ScalePermstruct_t* (global)
* The data structure to store the scaling and permutation vectors
* describing the transformations performed to the matrix A.
* It contains the following fields:
*
* o DiagScale (DiagScale_t)
* Specifies the form of equilibration that was done.
* = NOEQUIL: no equilibration.
* = ROW: row equilibration, i.e., A was premultiplied by
* diag(R).
* = COL: Column equilibration, i.e., A was postmultiplied
* by diag(C).
* = BOTH: both row and column equilibration, i.e., A was
* replaced by diag(R)*A*diag(C).
* If options->Fact = FACTORED or SamePattern_SameRowPerm,
* DiagScale is an input argument; otherwise it is an output
* argument.
*
* o perm_r (int*)
* Row permutation vector, which defines the permutation matrix Pr;
* perm_r[i] = j means row i of A is in position j in Pr*A.
* If options->RowPerm = MY_PERMR, or
* options->Fact = SamePattern_SameRowPerm, perm_r is an
* input argument; otherwise it is an output argument.
*
* o perm_c (int*)
* Column permutation vector, which defines the
* permutation matrix Pc; perm_c[i] = j means column i of A is
* in position j in A*Pc.
* If options->ColPerm = MY_PERMC or options->Fact = SamePattern
* or options->Fact = SamePattern_SameRowPerm, perm_c is an
* input argument; otherwise, it is an output argument.
* On exit, perm_c may be overwritten by the product of the input
* perm_c and a permutation that postorders the elimination tree
* of Pc*A'*A*Pc'; perm_c is not changed if the elimination tree
* is already in postorder.
*
* o R (double*) dimension (A->nrow)
* The row scale factors for A.
* If DiagScale = ROW or BOTH, A is multiplied on the left by
* diag(R).
* If DiagScale = NOEQUIL or COL, R is not defined.
* If options->Fact = FACTORED or SamePattern_SameRowPerm, R is
* an input argument; otherwise, R is an output argument.
*
* o C (double*) dimension (A->ncol)
* The column scale factors for A.
* If DiagScale = COL or BOTH, A is multiplied on the right by
* diag(C).
* If DiagScale = NOEQUIL or ROW, C is not defined.
* If options->Fact = FACTORED or SamePattern_SameRowPerm, C is
* an input argument; otherwise, C is an output argument.
*
* B (input/output) doublecomplex* (local)
* On entry, the right-hand side matrix of dimension (m_loc, nrhs),
* where, m_loc is the number of rows stored locally on my
* process and is defined in the data structure of matrix A.
* On exit, the solution matrix if info = 0;
*
* ldb (input) int (local)
* The leading dimension of matrix B.
*
* nrhs (input) int (global)
* The number of right-hand sides.
* If nrhs = 0, only LU decomposition is performed, the forward
* and back substitutions are skipped.
*
* grid (input) gridinfo_t* (global)
* The 2D process mesh. It contains the MPI communicator, the number
* of process rows (NPROW), the number of process columns (NPCOL),
* and my process rank. It is an input argument to all the
* parallel routines.
* Grid can be initialized by subroutine SUPERLU_GRIDINIT.
* See superlu_zdefs.h for the definition of 'gridinfo_t'.
*
* LUstruct (input/output) LUstruct_t*
* The data structures to store the distributed L and U factors.
* It contains the following fields:
*
* o etree (int*) dimension (A->ncol) (global)
* Elimination tree of Pc*(A'+A)*Pc' or Pc*A'*A*Pc'.
* It is computed in sp_colorder() during the first factorization,
* and is reused in the subsequent factorizations of the matrices
* with the same nonzero pattern.
* On exit of sp_colorder(), the columns of A are permuted so that
* the etree is in a certain postorder. This postorder is reflected
* in ScalePermstruct->perm_c.
* NOTE:
* Etree is a vector of parent pointers for a forest whose vertices
* are the integers 0 to A->ncol-1; etree[root]==A->ncol.
*
* o Glu_persist (Glu_persist_t*) (global)
* Global data structure (xsup, supno) replicated on all processes,
* describing the supernode partition in the factored matrices
* L and U:
* xsup[s] is the leading column of the s-th supernode,
* supno[i] is the supernode number to which column i belongs.
*
* o Llu (LocalLU_t*) (local)
* The distributed data structures to store L and U factors.
* See superlu_zdefs.h for the definition of 'LocalLU_t'.
*
* SOLVEstruct (input/output) SOLVEstruct_t*
* The data structure to hold the communication pattern used
* in the phases of triangular solution and iterative refinement.
* This pattern should be initialized only once for repeated solutions.
* If options->SolveInitialized = YES, it is an input argument.
* If options->SolveInitialized = NO and nrhs != 0, it is an output
* argument. See superlu_zdefs.h for the definition of 'SOLVEstruct_t'.
*
* berr (output) double*, dimension (nrhs) (global)
* The componentwise relative backward error of each solution
* vector X(j) (i.e., the smallest relative change in
* any element of A or B that makes X(j) an exact solution).
*
* stat (output) SuperLUStat_t*
* Record the statistics on runtime and floating-point operation count.
* See util.h for the definition of 'SuperLUStat_t'.
*
* info (output) int*
* = 0: successful exit
* > 0: if info = i, and i is
* <= A->ncol: U(i,i) is exactly zero. The factorization has
* been completed, but the factor U is exactly singular,
* so the solution could not be computed.
* > A->ncol: number of bytes allocated when memory allocation
* failure occurred, plus A->ncol.
*
* See superlu_zdefs.h for the definitions of various data types.
* </pre>
*/
void
pzgssvx(superlu_dist_options_t *options, SuperMatrix *A,
ScalePermstruct_t *ScalePermstruct,
doublecomplex B[], int ldb, int nrhs, gridinfo_t *grid,
LUstruct_t *LUstruct, SOLVEstruct_t *SOLVEstruct, double *berr,
SuperLUStat_t *stat, int *info)
{
NRformat_loc *Astore;
SuperMatrix GA; /* Global A in NC format */
NCformat *GAstore;
doublecomplex *a_GA;
SuperMatrix GAC; /* Global A in NCP format (add n end pointers) */
NCPformat *GACstore;
Glu_persist_t *Glu_persist = LUstruct->Glu_persist;
Glu_freeable_t *Glu_freeable;
/* The nonzero structures of L and U factors, which are
replicated on all processrs.
(lsub, xlsub) contains the compressed subscript of
supernodes in L.
(usub, xusub) contains the compressed subscript of
nonzero segments in U.
If options->Fact != SamePattern_SameRowPerm, they are
computed by SYMBFACT routine, and then used by PDDISTRIBUTE
routine. They will be freed after PDDISTRIBUTE routine.
If options->Fact == SamePattern_SameRowPerm, these
structures are not used. */
fact_t Fact;
doublecomplex *a;
int_t *colptr, *rowind;
int_t *perm_r; /* row permutations from partial pivoting */
int_t *perm_c; /* column permutation vector */
int_t *etree; /* elimination tree */
int_t *rowptr, *colind; /* Local A in NR*/
int_t colequ, Equil, factored, job, notran, rowequ, need_value;
int_t i, iinfo, j, irow, m, n, nnz, permc_spec;
int_t nnz_loc, m_loc, fst_row, icol;
int iam,iam_g;
int ldx; /* LDA for matrix X (local). */
char equed[1], norm[1];
double *C, *R, *C1, *R1, amax, anorm, colcnd, rowcnd;
doublecomplex *X, *b_col, *b_work, *x_col;
double t;
float GA_mem_use = 0.0; /* memory usage by global A */
float dist_mem_use = 0.0; /* memory usage during distribution */
superlu_dist_mem_usage_t num_mem_usage, symb_mem_usage;
int64_t nnzLU;
int_t nnz_tot;
doublecomplex *nzval_a;
doublecomplex asum,asum_tot,lsum,lsum_tot;
int_t nsupers,nsupers_j;
int_t lk,k,knsupc,nsupr;
int_t *lsub,*xsup;
doublecomplex *lusup;
#if ( PRNTlevel>= 2 )
double dmin, dsum, dprod;
#endif
LUstruct->dt = 'z';
/* Structures needed for parallel symbolic factorization */
int_t *sizes, *fstVtxSep, parSymbFact;
int noDomains, nprocs_num;
MPI_Comm symb_comm; /* communicator for symbolic factorization */
int col, key; /* parameters for creating a new communicator */
Pslu_freeable_t Pslu_freeable;
float flinfo;
/* Initialization. */
m = A->nrow;
n = A->ncol;
Astore = (NRformat_loc *) A->Store;
nnz_loc = Astore->nnz_loc;
m_loc = Astore->m_loc;
fst_row = Astore->fst_row;
a = (doublecomplex *) Astore->nzval;
rowptr = Astore->rowptr;
colind = Astore->colind;
sizes = NULL;
fstVtxSep = NULL;
symb_comm = MPI_COMM_NULL;
num_mem_usage.for_lu = num_mem_usage.total = 0.0;
symb_mem_usage.for_lu = symb_mem_usage.total = 0.0;
/* Test the input parameters. */
*info = 0;
Fact = options->Fact;
if ( Fact < 0 || Fact > FACTORED )
*info = -1;
else if ( options->RowPerm < 0 || options->RowPerm > MY_PERMR )
*info = -1;
else if ( options->ColPerm < 0 || options->ColPerm > MY_PERMC )
*info = -1;
else if ( options->IterRefine < 0 || options->IterRefine > SLU_EXTRA )
*info = -1;
else if ( options->IterRefine == SLU_EXTRA ) {
*info = -1;
printf("ERROR: Extra precise iterative refinement yet to support.\n");
} else if ( A->nrow != A->ncol || A->nrow < 0 || A->Stype != SLU_NR_loc
|| A->Dtype != SLU_Z || A->Mtype != SLU_GE )
*info = -2;
else if ( ldb < m_loc )
*info = -5;
else if ( nrhs < 0 )
*info = -6;
if ( sp_ienv_dist(2) > sp_ienv_dist(3) ) {
*info = 1;
printf("ERROR: Relaxation (NREL) cannot be larger than max. supernode size (NSUP).\n"
"\t-> Check parameter setting in sp_ienv_dist.c to correct error.\n");
}
if ( *info ) {
i = -(*info);
pxerr_dist("pzgssvx", grid, -*info);
return;
}
factored = (Fact == FACTORED);
Equil = (!factored && options->Equil == YES);
notran = (options->Trans == NOTRANS);
parSymbFact = options->ParSymbFact;
iam = grid->iam;
job = 5;
if ( factored || (Fact == SamePattern_SameRowPerm && Equil) ) {
rowequ = (ScalePermstruct->DiagScale == ROW) ||
(ScalePermstruct->DiagScale == BOTH);
colequ = (ScalePermstruct->DiagScale == COL) ||
(ScalePermstruct->DiagScale == BOTH);
} else rowequ = colequ = FALSE;
/* The following arrays are replicated on all processes. */
perm_r = ScalePermstruct->perm_r;
perm_c = ScalePermstruct->perm_c;
etree = LUstruct->etree;
R = ScalePermstruct->R;
C = ScalePermstruct->C;
/********/
#if ( DEBUGlevel>=1 )
CHECK_MALLOC(iam, "Enter pzgssvx()");
#endif
/* Not factored & ask for equilibration */
if ( Equil && Fact != SamePattern_SameRowPerm ) {
/* Allocate storage if not done so before. */
switch ( ScalePermstruct->DiagScale ) {
case NOEQUIL:
if ( !(R = (double *) doubleMalloc_dist(m)) )
ABORT("Malloc fails for R[].");
if ( !(C = (double *) doubleMalloc_dist(n)) )
ABORT("Malloc fails for C[].");
ScalePermstruct->R = R;
ScalePermstruct->C = C;
break;
case ROW:
if ( !(C = (double *) doubleMalloc_dist(n)) )
ABORT("Malloc fails for C[].");
ScalePermstruct->C = C;
break;
case COL:
if ( !(R = (double *) doubleMalloc_dist(m)) )
ABORT("Malloc fails for R[].");
ScalePermstruct->R = R;
break;
}
}
/* ------------------------------------------------------------
* Diagonal scaling to equilibrate the matrix. (simple scheme)
* for row i = 1:n, A(i,:) <- A(i,:) / max(abs(A(i,:));
* for column j = 1:n, A(:,j) <- A(:, j) / max(abs(A(:,j))
* ------------------------------------------------------------*/
if ( Equil ) {
#if ( DEBUGlevel>=1 )
CHECK_MALLOC(iam, "Enter equil");
#endif
t = SuperLU_timer_();
if ( Fact == SamePattern_SameRowPerm ) {
/* Reuse R and C. */
switch ( ScalePermstruct->DiagScale ) {
case NOEQUIL:
break;
case ROW:
irow = fst_row;
for (j = 0; j < m_loc; ++j) {
for (i = rowptr[j]; i < rowptr[j+1]; ++i) {
zd_mult(&a[i], &a[i], R[irow]); /* Scale rows */
}
++irow;
}
break;
case COL:
for (j = 0; j < m_loc; ++j)
for (i = rowptr[j]; i < rowptr[j+1]; ++i){
icol = colind[i];
zd_mult(&a[i], &a[i], C[icol]); /* Scale columns */
}
break;
case BOTH:
irow = fst_row;
for (j = 0; j < m_loc; ++j) {
for (i = rowptr[j]; i < rowptr[j+1]; ++i) {
icol = colind[i];
zd_mult(&a[i], &a[i], R[irow]); /* Scale rows */
zd_mult(&a[i], &a[i], C[icol]); /* Scale columns */
}
++irow;
}
break;
}
} else { /* Compute R & C from scratch */
/* Compute the row and column scalings. */
pzgsequ(A, R, C, &rowcnd, &colcnd, &amax, &iinfo, grid);
if ( iinfo > 0 ) {
if ( iinfo <= m ) {
#if ( PRNTlevel>=1 )
fprintf(stderr, "The " IFMT "-th row of A is exactly zero\n", iinfo);
#endif
} else {
#if ( PRNTlevel>=1 )
fprintf(stderr, "The " IFMT "-th column of A is exactly zero\n", iinfo-n);
#endif
}
} else if ( iinfo < 0 ) return;
/* Now iinfo == 0 */
/* Equilibrate matrix A if it is badly-scaled.
A <-- diag(R)*A*diag(C) */
pzlaqgs(A, R, C, rowcnd, colcnd, amax, equed);
if ( strncmp(equed, "R", 1)==0 ) {
ScalePermstruct->DiagScale = ROW;
rowequ = ROW;
} else if ( strncmp(equed, "C", 1)==0 ) {
ScalePermstruct->DiagScale = COL;
colequ = COL;
} else if ( strncmp(equed, "B", 1)==0 ) {
ScalePermstruct->DiagScale = BOTH;
rowequ = ROW;
colequ = COL;
} else ScalePermstruct->DiagScale = NOEQUIL;
#if ( PRNTlevel>=1 )
if ( !iam ) {
printf(".. equilibrated? *equed = %c\n", *equed);
fflush(stdout);
}
#endif
} /* end if Fact ... */
stat->utime[EQUIL] = SuperLU_timer_() - t;
#if ( DEBUGlevel>=1 )
CHECK_MALLOC(iam, "Exit equil");
#endif
} /* end if Equil ... LAPACK style, not involving MC64 */
if ( !factored ) { /* Skip this if already factored. */
/*
* For serial symbolic factorization, gather A from the distributed
* compressed row format to global A in compressed column format.
* Numerical values are gathered only when a row permutation
* for large diagonal is sought after.
*/
if ( Fact != SamePattern_SameRowPerm &&
(parSymbFact == NO || options->RowPerm != NO) ) {
/* Performs serial symbolic factorzation and/or MC64 */
need_value = (options->RowPerm == LargeDiag_MC64);
pzCompRow_loc_to_CompCol_global(need_value, A, grid, &GA);
GAstore = (NCformat *) GA.Store;
colptr = GAstore->colptr;
rowind = GAstore->rowind;
nnz = GAstore->nnz;
GA_mem_use = (nnz + n + 1) * sizeof(int_t);
if ( need_value ) {
a_GA = (doublecomplex *) GAstore->nzval;
GA_mem_use += nnz * sizeof(doublecomplex);
} else assert(GAstore->nzval == NULL);
}
/* ------------------------------------------------------------
Find the row permutation Pr for A, and apply Pr*[GA].
GA is overwritten by Pr*[GA].
------------------------------------------------------------*/
if ( options->RowPerm != NO ) {
t = SuperLU_timer_();
if ( Fact != SamePattern_SameRowPerm ) {
if ( options->RowPerm == MY_PERMR ) { /* Use user's perm_r. */
/* Permute the global matrix GA for symbfact() */
for (i = 0; i < colptr[n]; ++i) {
irow = rowind[i];
rowind[i] = perm_r[irow];
}
} else if ( options->RowPerm == LargeDiag_MC64 ) {
/* Get a new perm_r[] from MC64 */
if ( job == 5 ) {
/* Allocate storage for scaling factors. */
if ( !(R1 = doubleMalloc_dist(m)) )
ABORT("SUPERLU_MALLOC fails for R1[]");
if ( !(C1 = doubleMalloc_dist(n)) )
ABORT("SUPERLU_MALLOC fails for C1[]");
}
if ( !iam ) { /* Process 0 finds a row permutation */
iinfo = zldperm_dist(job, m, nnz, colptr, rowind, a_GA,
perm_r, R1, C1);
MPI_Bcast( &iinfo, 1, mpi_int_t, 0, grid->comm );
if ( iinfo == 0 ) {
MPI_Bcast( perm_r, m, mpi_int_t, 0, grid->comm );
if ( job == 5 && Equil ) {
MPI_Bcast( R1, m, MPI_DOUBLE, 0, grid->comm );
MPI_Bcast( C1, n, MPI_DOUBLE, 0, grid->comm );
}
}
} else {
MPI_Bcast( &iinfo, 1, mpi_int_t, 0, grid->comm );
if ( iinfo == 0 ) {
MPI_Bcast( perm_r, m, mpi_int_t, 0, grid->comm );
if ( job == 5 && Equil ) {
MPI_Bcast( R1, m, MPI_DOUBLE, 0, grid->comm );
MPI_Bcast( C1, n, MPI_DOUBLE, 0, grid->comm );
}
}
}
if ( iinfo && job == 5) { /* Error return */
SUPERLU_FREE(R1);
SUPERLU_FREE(C1);
}
#if ( PRNTlevel>=2 )
dmin = dmach_dist("Overflow");
dsum = 0.0;
dprod = 1.0;
#endif
if ( iinfo == 0 ) {
if ( job == 5 ) {
if ( Equil ) {
for (i = 0; i < n; ++i) {
R1[i] = exp(R1[i]);
C1[i] = exp(C1[i]);
}
/* Scale the distributed matrix further.
A <-- diag(R1)*A*diag(C1) */
irow = fst_row;
for (j = 0; j < m_loc; ++j) {
for (i = rowptr[j]; i < rowptr[j+1]; ++i) {
icol = colind[i];
zd_mult(&a[i], &a[i], R1[irow]);
zd_mult(&a[i], &a[i], C1[icol]);
#if ( PRNTlevel>=2 )
if ( perm_r[irow] == icol ) { /* New diagonal */
if ( job == 2 || job == 3 )
dmin = SUPERLU_MIN(dmin, slud_z_abs1(&a[i]));
else if ( job == 4 )
dsum += slud_z_abs1(&a[i]);
else if ( job == 5 )
dprod *= slud_z_abs1(&a[i]);
}
#endif
}
++irow;
}
/* Multiply together the scaling factors --
R/C from simple scheme, R1/C1 from MC64. */
if ( rowequ ) for (i = 0; i < m; ++i) R[i] *= R1[i];
else for (i = 0; i < m; ++i) R[i] = R1[i];
if ( colequ ) for (i = 0; i < n; ++i) C[i] *= C1[i];
else for (i = 0; i < n; ++i) C[i] = C1[i];
ScalePermstruct->DiagScale = BOTH;
rowequ = colequ = 1;
} /* end Equil */
/* Now permute global GA to prepare for symbfact() */
for (j = 0; j < n; ++j) {
for (i = colptr[j]; i < colptr[j+1]; ++i) {
irow = rowind[i];
rowind[i] = perm_r[irow];
}
}
SUPERLU_FREE (R1);
SUPERLU_FREE (C1);
} else { /* job = 2,3,4 */
for (j = 0; j < n; ++j) {
for (i = colptr[j]; i < colptr[j+1]; ++i) {
irow = rowind[i];
rowind[i] = perm_r[irow];
} /* end for i ... */
} /* end for j ... */
} /* end else job ... */
} else { /* if iinfo != 0 */
for (i = 0; i < m; ++i) perm_r[i] = i;
}
#if ( PRNTlevel>=2 )
if ( job == 2 || job == 3 ) {
if ( !iam ) printf("\tsmallest diagonal %e\n", dmin);
} else if ( job == 4 ) {
if ( !iam ) printf("\tsum of diagonal %e\n", dsum);
} else if ( job == 5 ) {
if ( !iam ) printf("\t product of diagonal %e\n", dprod);
}
#endif
} else { /* use largeDiag_AWPM */
#ifdef HAVE_COMBBLAS
c2cpp_GetAWPM(A, grid, ScalePermstruct);
#else
if ( iam == 0 ) {
printf("CombBLAS is not available\n"); fflush(stdout);
}
#endif
} /* end if options->RowPerm ... */
t = SuperLU_timer_() - t;
stat->utime[ROWPERM] = t;
#if ( PRNTlevel>=1 )
if ( !iam ) {
printf(".. LDPERM job " IFMT "\t time: %.2f\n", job, t);
fflush(stdout);
}
#endif
} /* end if Fact ... */
} else { /* options->RowPerm == NOROWPERM / NATURAL */
for (i = 0; i < m; ++i) perm_r[i] = i;
}
#if ( DEBUGlevel>=2 )
if ( !iam ) PrintInt10("perm_r", m, perm_r);
#endif
} /* end if (!factored) */
if ( !factored || options->IterRefine ) {
/* Compute norm(A), which will be used to adjust small diagonal. */
if ( notran ) *(unsigned char *)norm = '1';
else *(unsigned char *)norm = 'I';
anorm = pzlangs(norm, A, grid);
#if ( PRNTlevel>=1 )
if ( !iam ) { printf(".. anorm %e\n", anorm); fflush(stdout); }
#endif
}
/* ------------------------------------------------------------
Perform the LU factorization: symbolic factorization,
redistribution, and numerical factorization.
------------------------------------------------------------*/
if ( !factored ) {
t = SuperLU_timer_();
/*
* Get column permutation vector perm_c[], according to permc_spec:
* permc_spec = NATURAL: natural ordering
* permc_spec = MMD_AT_PLUS_A: minimum degree on structure of A'+A
* permc_spec = MMD_ATA: minimum degree on structure of A'*A
* permc_spec = METIS_AT_PLUS_A: METIS on structure of A'+A
* permc_spec = PARMETIS: parallel METIS on structure of A'+A
* permc_spec = MY_PERMC: the ordering already supplied in perm_c[]
*/
permc_spec = options->ColPerm;
if ( parSymbFact == YES || permc_spec == PARMETIS ) {
nprocs_num = grid->nprow * grid->npcol;
noDomains = (int) ( pow(2, ((int) LOG2( nprocs_num ))));
/* create a new communicator for the first noDomains
processes in grid->comm */
key = iam;
if (iam < noDomains) col = 0;
else col = MPI_UNDEFINED;
MPI_Comm_split (grid->comm, col, key, &symb_comm );
if ( permc_spec == NATURAL || permc_spec == MY_PERMC ) {
if ( permc_spec == NATURAL ) {
for (j = 0; j < n; ++j) perm_c[j] = j;
}
if ( !(sizes = intMalloc_dist(2 * noDomains)) )
ABORT("SUPERLU_MALLOC fails for sizes.");
if ( !(fstVtxSep = intMalloc_dist(2 * noDomains)) )
ABORT("SUPERLU_MALLOC fails for fstVtxSep.");
for (i = 0; i < 2*noDomains - 2; ++i) {
sizes[i] = 0;
fstVtxSep[i] = 0;
}
sizes[2*noDomains - 2] = m;
fstVtxSep[2*noDomains - 2] = 0;
} else if ( permc_spec != PARMETIS ) { /* same as before */
printf("{" IFMT "," IFMT "}: pzgssvx: invalid ColPerm option when ParSymbfact is used\n",
MYROW(grid->iam, grid), MYCOL(grid->iam, grid));
}
}
if ( permc_spec != MY_PERMC && Fact == DOFACT ) {
/* Reuse perm_c if Fact == SamePattern, or SamePattern_SameRowPerm */
if ( permc_spec == PARMETIS ) {
// #pragma omp parallel
// {
// #pragma omp master
// {
/* Get column permutation vector in perm_c. *
* This routine takes as input the distributed input matrix A *
* and does not modify it. It also allocates memory for *
* sizes[] and fstVtxSep[] arrays, that contain information *
* on the separator tree computed by ParMETIS. */
flinfo = get_perm_c_parmetis(A, perm_r, perm_c, nprocs_num,
noDomains, &sizes, &fstVtxSep,
grid, &symb_comm);
// }
// }
if (flinfo > 0) {
#if ( PRNTlevel>=1 )
fprintf(stderr, "Insufficient memory for get_perm_c parmetis\n");
#endif
*info = flinfo;
return;
}
} else {
get_perm_c_dist(iam, permc_spec, &GA, perm_c);
}
}
stat->utime[COLPERM] = SuperLU_timer_() - t;
/* Symbolic factorization. */
if ( Fact != SamePattern_SameRowPerm ) {
if ( parSymbFact == NO ) { /* Perform serial symbolic factorization */
/* GA = Pr*A, perm_r[] is already applied. */
int_t *GACcolbeg, *GACcolend, *GACrowind;
/* Compute the elimination tree of Pc*(A^T+A)*Pc^T or Pc*A^T*A*Pc^T
(a.k.a. column etree), depending on the choice of ColPerm.
Adjust perm_c[] to be consistent with a postorder of etree.
Permute columns of A to form A*Pc'.
After this routine, GAC = GA*Pc^T. */
sp_colorder(options, &GA, perm_c, etree, &GAC);
/* Form Pc*A*Pc^T to preserve the diagonal of the matrix GAC. */
GACstore = (NCPformat *) GAC.Store;
GACcolbeg = GACstore->colbeg;
GACcolend = GACstore->colend;
GACrowind = GACstore->rowind;
for (j = 0; j < n; ++j) {
for (i = GACcolbeg[j]; i < GACcolend[j]; ++i) {
irow = GACrowind[i];
GACrowind[i] = perm_c[irow];
}
}
/* Perform a symbolic factorization on Pc*Pr*A*Pc^T and set up
the nonzero data structures for L & U. */
#if ( PRNTlevel>=1 )
if ( !iam ) {
printf(".. symbfact(): relax " IFMT ", maxsuper " IFMT ", fill " IFMT "\n",
sp_ienv_dist(2), sp_ienv_dist(3), sp_ienv_dist(6));
fflush(stdout);
}
#endif
t = SuperLU_timer_();
if ( !(Glu_freeable = (Glu_freeable_t *)
SUPERLU_MALLOC(sizeof(Glu_freeable_t))) )
ABORT("Malloc fails for Glu_freeable.");
/* Every process does this. */
iinfo = symbfact(options, iam, &GAC, perm_c, etree,
Glu_persist, Glu_freeable);
nnzLU = Glu_freeable->nnzLU;
stat->utime[SYMBFAC] = SuperLU_timer_() - t;
if ( iinfo <= 0 ) { /* Successful return */
QuerySpace_dist(n, -iinfo, Glu_freeable, &symb_mem_usage);
#if ( PRNTlevel>=1 )
if ( !iam ) {
printf("\tNo of supers " IFMT "\n", Glu_persist->supno[n-1]+1);
printf("\tSize of G(L) " IFMT "\n", Glu_freeable->xlsub[n]);
printf("\tSize of G(U) " IFMT "\n", Glu_freeable->xusub[n]);
printf("\tint %d, short %d, float %d, double %d\n",
(int) sizeof(int_t), (int) sizeof(short),
(int) sizeof(float), (int) sizeof(double));
printf("\tSYMBfact (MB):\tL\\U %.2f\ttotal %.2f\texpansions " IFMT "\n",
symb_mem_usage.for_lu*1e-6,
symb_mem_usage.total*1e-6,
symb_mem_usage.expansions);
fflush(stdout);
}
#endif
} else { /* symbfact out of memory */
#if ( PRNTlevel>=1 )
if ( !iam )
fprintf(stderr,"symbfact() error returns " IFMT "\n",iinfo);
#endif
*info = iinfo;
return;
}
} /* end serial symbolic factorization */
else { /* parallel symbolic factorization */
t = SuperLU_timer_();
flinfo = symbfact_dist(nprocs_num, noDomains, A, perm_c, perm_r,
sizes, fstVtxSep, &Pslu_freeable,
&(grid->comm), &symb_comm,
&symb_mem_usage);
nnzLU = Pslu_freeable.nnzLU;
stat->utime[SYMBFAC] = SuperLU_timer_() - t;
if (flinfo > 0) {
#if ( PRNTlevel>=1 )
fprintf(stderr, "Insufficient memory for parallel symbolic factorization.");
#endif
*info = flinfo;
return;
}
}
/* Destroy global GA */
if ( parSymbFact == NO || options->RowPerm != NO )
Destroy_CompCol_Matrix_dist(&GA);
if ( parSymbFact == NO )
Destroy_CompCol_Permuted_dist(&GAC);
} /* end if Fact != SamePattern_SameRowPerm ... */
if (sizes) SUPERLU_FREE (sizes);
if (fstVtxSep) SUPERLU_FREE (fstVtxSep);
if (symb_comm != MPI_COMM_NULL) MPI_Comm_free (&symb_comm);
/* Distribute entries of A into L & U data structures. */
//if (parSymbFact == NO || ???? Fact == SamePattern_SameRowPerm) {
if ( parSymbFact == NO ) {
/* CASE OF SERIAL SYMBOLIC */
/* Apply column permutation to the original distributed A */
for (j = 0; j < nnz_loc; ++j) colind[j] = perm_c[colind[j]];
/* Distribute Pc*Pr*diag(R)*A*diag(C)*Pc^T into L and U storage.
NOTE: the row permutation Pc*Pr is applied internally in the
distribution routine. */
t = SuperLU_timer_();
dist_mem_use = pzdistribute(Fact, n, A, ScalePermstruct,
Glu_freeable, LUstruct, grid);
stat->utime[DIST] = SuperLU_timer_() - t;
/* Deallocate storage used in symbolic factorization. */
if ( Fact != SamePattern_SameRowPerm ) {
iinfo = symbfact_SubFree(Glu_freeable);
SUPERLU_FREE(Glu_freeable);
}
} else { /* CASE OF PARALLEL SYMBOLIC */
/* Distribute Pc*Pr*diag(R)*A*diag(C)*Pc' into L and U storage.
NOTE: the row permutation Pc*Pr is applied internally in the
distribution routine. */
/* Apply column permutation to the original distributed A */
for (j = 0; j < nnz_loc; ++j) colind[j] = perm_c[colind[j]];
t = SuperLU_timer_();
dist_mem_use = zdist_psymbtonum(Fact, n, A, ScalePermstruct,
&Pslu_freeable, LUstruct, grid);
if (dist_mem_use > 0)
ABORT ("Not enough memory available for dist_psymbtonum\n");
stat->utime[DIST] = SuperLU_timer_() - t;
}
/*if (!iam) printf ("\tDISTRIBUTE time %8.2f\n", stat->utime[DIST]);*/
/* Perform numerical factorization in parallel. */
t = SuperLU_timer_();
// #pragma omp parallel
// {
// #pragma omp master
// {
pzgstrf(options, m, n, anorm, LUstruct, grid, stat, info);
stat->utime[FACT] = SuperLU_timer_() - t;
// }
// }
#if ( PRNTlevel>=2 )
/* ------------------------------------------------------------
SUM OVER ALL ENTRIES OF A AND PRINT NNZ AND SIZE OF A.
------------------------------------------------------------*/
Astore = (NRformat_loc *) A->Store;
xsup = Glu_persist->xsup;
nzval_a = Astore->nzval;
asum.r=0.0;
asum.i=0.0;
for (i = 0; i < Astore->m_loc; ++i) {
for (j = Astore->rowptr[i]; j < Astore->rowptr[i+1]; ++j) {
z_add(&asum,&asum,&nzval_a[j]);
}
}
nsupers = Glu_persist->supno[n-1] + 1;
nsupers_j = CEILING( nsupers, grid->npcol ); /* Number of local block columns */
lsum.r=0.0;
lsum.i=0.0;
for (lk=0;lk<nsupers_j;++lk){
lsub = LUstruct->Llu->Lrowind_bc_ptr[lk];
lusup = LUstruct->Llu->Lnzval_bc_ptr[lk];
if(lsub){
k = MYCOL(grid->iam, grid)+lk*grid->npcol; /* not sure */
knsupc = SuperSize( k );
nsupr = lsub[1];
for (j=0; j<knsupc; ++j)
for (i = 0; i < nsupr; ++i)
z_add(&lsum,&lsum,&lusup[j*nsupr+i]);
}
}
MPI_Allreduce( &(asum.r), &(asum_tot.r),1, MPI_DOUBLE, MPI_SUM, grid->comm );
MPI_Allreduce( &(asum.i), &(asum_tot.i),1, MPI_DOUBLE, MPI_SUM, grid->comm );
MPI_Allreduce( &(lsum.r), &(lsum_tot.r),1, MPI_DOUBLE, MPI_SUM, grid->comm );
MPI_Allreduce( &(lsum.i), &(lsum_tot.i),1, MPI_DOUBLE, MPI_SUM, grid->comm );
MPI_Allreduce( &Astore->rowptr[Astore->m_loc], &nnz_tot,1, mpi_int_t, MPI_SUM, grid->comm );
// MPI_Bcast( &nnzLU, 1, mpi_int_t, 0, grid->comm );
MPI_Comm_rank( MPI_COMM_WORLD, &iam_g );
if (!iam_g) {
print_options_dist(options);
fflush(stdout);
}
printf(".. Ainfo mygid %5d mysid %5d nnz_loc " IFMT " sum_loc %e lsum_loc %e nnz "IFMT " nnzLU %ld sum %e lsum %e N "IFMT "\n", iam_g,iam,Astore->rowptr[Astore->m_loc],asum.r+asum.i, lsum.r+lsum.i, nnz_tot,nnzLU,asum_tot.r+asum_tot.i,lsum_tot.r+lsum_tot.i,A->ncol);
fflush(stdout);
#endif
#if 0
// #ifdef GPU_PROF
// if(!iam )
// {
// char* ttemp;
// ttemp = getenv("IO_FILE");
// if(ttemp!=NULL)
// {
// printf("File being opend is %s\n",ttemp );
// FILE* fp;
// fp = fopen(ttemp,"w");
// if(!fp)
// {
// fprintf(stderr," Couldn't open output file %s\n",ttemp);
// }
// int nsup=Glu_persist->supno[n-1]+1;
// int ii;
// for (ii = 0; ii < nsup; ++ii)
// {
// fprintf(fp,"%d,%d,%d,%d,%d,%d\n",gs1.mnk_min_stats[ii],gs1.mnk_min_stats[ii+nsup],
// gs1.mnk_min_stats[ii+2*nsup],
// gs1.mnk_max_stats[ii],gs1.mnk_max_stats[ii+nsup],gs1.mnk_max_stats[ii+2*nsup]);
// }
// // lastly put the timeing stats that we need
// fprintf(fp,"Min %lf Max %lf totaltime %lf \n",gs1.osDgemmMin,gs1.osDgemmMax,stat->utime[FACT]);
// fclose(fp);
// }
// }
// #endif
#endif
if ( options->PrintStat ) {
int_t TinyPivots;
float for_lu, total, max, avg, temp;
zQuerySpace_dist(n, LUstruct, grid, stat, &num_mem_usage);
if (parSymbFact == TRUE) {
/* The memory used in the redistribution routine
includes the memory used for storing the symbolic
structure and the memory allocated for numerical
factorization */
temp = SUPERLU_MAX(symb_mem_usage.total, -dist_mem_use);
if ( options->RowPerm != NO )
temp = SUPERLU_MAX(temp, GA_mem_use);
} else {
temp = SUPERLU_MAX (
symb_mem_usage.total + GA_mem_use, /* symbfact step */
symb_mem_usage.for_lu + dist_mem_use +
num_mem_usage.for_lu /* distribution step */
);
}
temp = SUPERLU_MAX(temp, num_mem_usage.total);
MPI_Reduce( &temp, &max,
1, MPI_FLOAT, MPI_MAX, 0, grid->comm );
MPI_Reduce( &temp, &avg,
1, MPI_FLOAT, MPI_SUM, 0, grid->comm );
MPI_Allreduce( &stat->TinyPivots, &TinyPivots, 1, mpi_int_t,
MPI_SUM, grid->comm );
stat->TinyPivots = TinyPivots;
MPI_Reduce( &num_mem_usage.for_lu, &for_lu,
1, MPI_FLOAT, MPI_SUM, 0, grid->comm );
MPI_Reduce( &num_mem_usage.total, &total,
1, MPI_FLOAT, MPI_SUM, 0, grid->comm );
if (!iam) {
printf("\n** Memory Usage **********************************\n");
printf("** NUMfact space (MB): (sum-of-all-processes)\n"
" L\\U : %8.2f | Total : %8.2f\n",
for_lu * 1e-6, total * 1e-6);
printf("** Total highmark (MB):\n"
" Sum-of-all : %8.2f | Avg : %8.2f | Max : %8.2f\n",
avg * 1e-6,
avg / grid->nprow / grid->npcol * 1e-6,
max * 1e-6);
printf("**************************************************\n");
fflush(stdout);
}
} /* end printing stats */
} /* end if (!factored) */
if ( options->Fact == DOFACT || options->Fact == SamePattern ) {
/* Need to reset the solve's communication pattern,
because perm_r[] and/or perm_c[] is changed. */
if ( options->SolveInitialized == YES ) { /* Initialized before */
zSolveFinalize(options, SOLVEstruct); /* Clean up structure */
options->SolveInitialized = NO; /* Reset the solve state */
}
}
#if 0
/* Need to revisit: Why the following is not good enough for X-to-B
distribution -- inv_perm_c changed */
pxgstrs_finalize(SOLVEstruct->gstrs_comm);
pxgstrs_init(A->ncol, m_loc, nrhs, fst_row, perm_r, perm_c, grid,
LUstruct->Glu_persist, SOLVEstruct);
#endif
/* ------------------------------------------------------------
Compute the solution matrix X.
------------------------------------------------------------*/
if ( nrhs && *info == 0 ) {
if ( !(b_work = doublecomplexMalloc_dist(n)) )
ABORT("Malloc fails for b_work[]");
/* ------------------------------------------------------------
Scale the right-hand side if equilibration was performed.
------------------------------------------------------------*/
if ( notran ) {
if ( rowequ ) {
b_col = B;
for (j = 0; j < nrhs; ++j) {
irow = fst_row;
for (i = 0; i < m_loc; ++i) {
zd_mult(&b_col[i], &b_col[i], R[irow]);
++irow;
}
b_col += ldb;
}
}
} else if ( colequ ) {
b_col = B;
for (j = 0; j < nrhs; ++j) {
irow = fst_row;
for (i = 0; i < m_loc; ++i) {
zd_mult(&b_col[i], &b_col[i], C[irow]);
++irow;
}
b_col += ldb;
}
}
/* Save a copy of the right-hand side. */
ldx = ldb;
if ( !(X = doublecomplexMalloc_dist(((size_t)ldx) * nrhs)) )
ABORT("Malloc fails for X[]");
x_col = X; b_col = B;
for (j = 0; j < nrhs; ++j) {
#if 0 /* Sherry */
for (i = 0; i < m_loc; ++i) x_col[i] = b_col[i];
#endif
memcpy(x_col, b_col, m_loc * sizeof(doublecomplex));
x_col += ldx; b_col += ldb;
}
/* ------------------------------------------------------------
Solve the linear system.
------------------------------------------------------------*/
if ( options->SolveInitialized == NO ) { /* First time */
zSolveInit(options, A, perm_r, perm_c, nrhs, LUstruct, grid,
SOLVEstruct);
/* Inside this routine, SolveInitialized is set to YES.
For repeated call to pzgssvx(), no need to re-initialilze
the Solve data & communication structures, unless a new
factorization with Fact == DOFACT or SamePattern is asked for. */
}
if ( options->DiagInv==YES &&
(options->SolveInitialized == NO || Fact == SamePattern ||
Fact == SamePattern_SameRowPerm) ) {
pzCompute_Diag_Inv(n, LUstruct, grid, stat, info);
}
// #pragma omp parallel
// {
// #pragma omp master
// {
pzgstrs(n, LUstruct, ScalePermstruct, grid, X, m_loc,
fst_row, ldb, nrhs, SOLVEstruct, stat, info);
// }
// }
/* ------------------------------------------------------------
Use iterative refinement to improve the computed solution and
compute error bounds and backward error estimates for it.
------------------------------------------------------------*/
if ( options->IterRefine ) {
/* Improve the solution by iterative refinement. */
int_t *it;
int_t *colind_gsmv = SOLVEstruct->A_colind_gsmv;
/* This was allocated and set to NULL in zSolveInit() */
SOLVEstruct_t *SOLVEstruct1; /* Used by refinement. */
t = SuperLU_timer_();
if ( options->RefineInitialized == NO || Fact == DOFACT ) {
/* All these cases need to re-initialize gsmv structure */
if ( options->RefineInitialized )
pzgsmv_finalize(SOLVEstruct->gsmv_comm);
pzgsmv_init(A, SOLVEstruct->row_to_proc, grid,
SOLVEstruct->gsmv_comm);
/* Save a copy of the transformed local col indices
in colind_gsmv[]. */
if ( colind_gsmv ) SUPERLU_FREE(colind_gsmv);
if ( !(it = intMalloc_dist(nnz_loc)) )
ABORT("Malloc fails for colind_gsmv[]");
colind_gsmv = SOLVEstruct->A_colind_gsmv = it;
for (i = 0; i < nnz_loc; ++i) colind_gsmv[i] = colind[i];
options->RefineInitialized = YES;
} else if ( Fact == SamePattern ||
Fact == SamePattern_SameRowPerm ) {
doublecomplex atemp;
int_t k, jcol, p;
/* Swap to beginning the part of A corresponding to the
local part of X, as was done in pzgsmv_init() */
for (i = 0; i < m_loc; ++i) { /* Loop through each row */
k = rowptr[i];
for (j = rowptr[i]; j < rowptr[i+1]; ++j) {
jcol = colind[j];
p = SOLVEstruct->row_to_proc[jcol];
if ( p == iam ) { /* Local */
atemp = a[k]; a[k] = a[j]; a[j] = atemp;
++k;
}
}
}
/* Re-use the local col indices of A obtained from the
previous call to pzgsmv_init() */
for (i = 0; i < nnz_loc; ++i) colind[i] = colind_gsmv[i];
}
if ( nrhs == 1 ) { /* Use the existing solve structure */
SOLVEstruct1 = SOLVEstruct;
} else { /* For nrhs > 1, since refinement is performed for RHS
one at a time, the communication structure for pdgstrs
is different than the solve with nrhs RHS.
So we use SOLVEstruct1 for the refinement step.
*/
if ( !(SOLVEstruct1 = (SOLVEstruct_t *)
SUPERLU_MALLOC(sizeof(SOLVEstruct_t))) )
ABORT("Malloc fails for SOLVEstruct1");
/* Copy the same stuff */
SOLVEstruct1->row_to_proc = SOLVEstruct->row_to_proc;
SOLVEstruct1->inv_perm_c = SOLVEstruct->inv_perm_c;
SOLVEstruct1->num_diag_procs = SOLVEstruct->num_diag_procs;
SOLVEstruct1->diag_procs = SOLVEstruct->diag_procs;
SOLVEstruct1->diag_len = SOLVEstruct->diag_len;
SOLVEstruct1->gsmv_comm = SOLVEstruct->gsmv_comm;
SOLVEstruct1->A_colind_gsmv = SOLVEstruct->A_colind_gsmv;
/* Initialize the *gstrs_comm for 1 RHS. */
if ( !(SOLVEstruct1->gstrs_comm = (pxgstrs_comm_t *)
SUPERLU_MALLOC(sizeof(pxgstrs_comm_t))) )
ABORT("Malloc fails for gstrs_comm[]");
pxgstrs_init(n, m_loc, 1, fst_row, perm_r, perm_c, grid,
Glu_persist, SOLVEstruct1);
}
pzgsrfs(n, A, anorm, LUstruct, ScalePermstruct, grid,
B, ldb, X, ldx, nrhs, SOLVEstruct1, berr, stat, info);
/* Deallocate the storage associated with SOLVEstruct1 */
if ( nrhs > 1 ) {
pxgstrs_finalize(SOLVEstruct1->gstrs_comm);
SUPERLU_FREE(SOLVEstruct1);
}
stat->utime[REFINE] = SuperLU_timer_() - t;
} /* end if IterRefine */
/* Permute the solution matrix B <= Pc'*X. */
pzPermute_Dense_Matrix(fst_row, m_loc, SOLVEstruct->row_to_proc,
SOLVEstruct->inv_perm_c,
X, ldx, B, ldb, nrhs, grid);
#if ( DEBUGlevel>=2 )
printf("\n (%d) .. After pzPermute_Dense_Matrix(): b =\n", iam);
for (i = 0; i < m_loc; ++i)
printf("\t(%d)\t%4d\t%.10f\n", iam, i+fst_row, B[i]);
#endif
/* Transform the solution matrix X to a solution of the original
system before equilibration. */
if ( notran ) {
if ( colequ ) {
b_col = B;
for (j = 0; j < nrhs; ++j) {
irow = fst_row;
for (i = 0; i < m_loc; ++i) {
zd_mult(&b_col[i], &b_col[i], C[irow]);
++irow;
}
b_col += ldb;
}
}
} else if ( rowequ ) {
b_col = B;
for (j = 0; j < nrhs; ++j) {
irow = fst_row;
for (i = 0; i < m_loc; ++i) {
zd_mult(&b_col[i], &b_col[i], R[irow]);
++irow;
}
b_col += ldb;
}
}
SUPERLU_FREE(b_work);
SUPERLU_FREE(X);
} /* end if nrhs != 0 && *info == 0 */
#if ( PRNTlevel>=1 )
if ( !iam ) printf(".. DiagScale = %d\n", ScalePermstruct->DiagScale);
#endif
/* Deallocate R and/or C if it was not used. */
if ( Equil && Fact != SamePattern_SameRowPerm ) {
switch ( ScalePermstruct->DiagScale ) {
case NOEQUIL:
SUPERLU_FREE(R);
SUPERLU_FREE(C);
break;
case ROW:
SUPERLU_FREE(C);
break;
case COL:
SUPERLU_FREE(R);
break;
}
}
#if 0
if ( !factored && Fact != SamePattern_SameRowPerm && !parSymbFact)
Destroy_CompCol_Permuted_dist(&GAC);
#endif
#if ( DEBUGlevel>=1 )
CHECK_MALLOC(iam, "Exit pzgssvx()");
#endif
}
|
kt_sbucket.c |
#include "kt_sbucket.h"
void sbucket_update_edge(
support_bucket_t * const sbucket,
int64_t const edge_id,
int32_t const support,
int32_t const ktruss)
{
slist_s * const slist = sbucket->slist;
/* no-op if edge has already been deleted or updated */
if(support < 0 || support == slist[edge_id].support) {
return;
}
/* peel starting at (ktruss - 3) */
int32_t const min_sup = ktruss - 3;
ssize_t * shead = sbucket->list_head;
/*
* NOTE: The logic of selecting new/old support instead of what is actually
* given is that we are ultimately interested in the smallest bucket
* having ALL edges which need to be peeled. So bottom-occupied list actually
* contains all to-be-peeled edges.
*/
/* remove edge_id from current support-bucket */
int32_t const old_sup = gk_max(slist[edge_id].support, min_sup);
/* if edge_id is the head of the list */
if(shead[old_sup] == edge_id) {
shead[old_sup] = slist[edge_id].next_eid;
slist[slist[edge_id].next_eid].prev_eid = -1;
} else {
slist[slist[edge_id].prev_eid].next_eid = slist[edge_id].next_eid;
slist[slist[edge_id].next_eid].prev_eid = slist[edge_id].prev_eid;
}
/* now add edge_id to the head of the new list */
int32_t const new_sup = gk_max(support, min_sup);
slist[edge_id].support = support;
slist[edge_id].prev_eid = -1;
slist[edge_id].next_eid = shead[new_sup];
slist[shead[new_sup]].prev_eid = edge_id;
shead[new_sup] = edge_id;
}
int64_t sbucket_count_support_size(
support_bucket_t const * const sbucket,
int32_t const support)
{
if(support >= sbucket->nsupports) {
return 0;
}
/* traverse linked list to count edges */
int64_t nedges = 0;
ssize_t e_id = sbucket->list_head[support];
while(e_id != -1) {
++nedges;
e_id = sbucket->slist[e_id].next_eid;
}
return nedges;
}
int64_t sbucket_get_frontier(
support_bucket_t * const sbuckets,
int32_t const support,
int64_t * frontier)
{
int const nbuckets = omp_get_max_threads() * KT_BUCKETS_PER_THREAD;
int64_t bucket_sizes[1 + KT_MAX_THREADS * KT_BUCKETS_PER_THREAD];
#pragma omp parallel
{
/* first get size of each bucket */
#pragma omp for schedule(dynamic, 1)
for(int b=0; b < nbuckets; ++b) {
bucket_sizes[b] = sbucket_count_support_size(&(sbuckets[b]), support);
}
/* prefix sum to allow parallel writes */
#pragma omp single
{
int b;
MAKECSR(b, nbuckets, bucket_sizes);
}
/* now copy data into frontier buffer */
#pragma omp for schedule(dynamic, 1)
for(int b=0; b < nbuckets; ++b) {
/* traverse list and fill buffer */
int64_t * buffer = &(frontier[bucket_sizes[b]]);
int64_t edge_ptr = 0;
ssize_t e_id = sbuckets[b].list_head[support];
while(e_id != -1) {
buffer[edge_ptr++] = e_id;
e_id = sbuckets[b].slist[e_id].next_eid;
}
/* We are deleting all edges in bucket, so update head of list. */
sbuckets[b].list_head[support] = -1;
sbuckets[b].slist[-1].prev_eid = -1;
sbuckets[b].slist[-1].next_eid = -1;
} /* foreach bucket */
} /* end omp parallel */
return bucket_sizes[nbuckets];
}
void sbucket_fill_edges(
support_bucket_t const * const sbucket,
int32_t const support,
int64_t * const restrict edge_ids)
{
if(support >= sbucket->nsupports) {
return;
}
/* traverse linked list and fill buffer */
int64_t edge_ptr = 0;
ssize_t e_id = sbucket->list_head[support];
while(e_id != -1) {
edge_ids[edge_ptr++] = e_id;
e_id = sbucket->slist[e_id].next_eid;
}
}
support_bucket_t * sbucket_alloc(
edge_t const * const edges,
int32_t const * const supports,
int64_t const global_nedges,
thread_ws * * thd_ws)
{
/* allocate buckets */
int const nbuckets = omp_get_max_threads() * KT_BUCKETS_PER_THREAD;
support_bucket_t * sbuckets = gk_malloc(nbuckets * sizeof(*sbuckets),
"sbuckets");
int32_t const nsupports = max_elem(supports, global_nedges) + 1;
/*
* It is easier to have a single global slist that the various buckets
* point into. This allows us to avoid any local <-> global mappings of
* edge IDs.
*/
slist_s * big_slist = gk_malloc((global_nedges+1)*sizeof(*big_slist),
"big_slist");
par_memset(big_slist, 0, (global_nedges+1) * sizeof(*big_slist));
++big_slist; /* +1 to allow slist[-1] to be valid */
/* allocate each thread-bucket */
#pragma omp parallel for schedule(static, 1)
for(int bucket=0; bucket < nbuckets; ++bucket) {
support_bucket_t * sbucket = &(sbuckets[bucket]);
sbucket->nsupports = nsupports;
sbucket->nowned_edges = 0;
sbucket->slist = big_slist;
sbucket->list_head =
gk_malloc(sbucket->nsupports * sizeof(*sbucket->list_head), "list_head");
ssize_t * const shead = sbucket->list_head;
for(int32_t s=0; s < sbucket->nsupports; ++s) {
shead[s] = -1;
}
}
/* go over all edges and assign to support-buckets */
for(int64_t e=0; e < global_nedges; ++e) {
int64_t const bucket = map_edge_to_bucket(e, thd_ws[0]);
support_bucket_t * sbucket = &(sbuckets[bucket]);
slist_s * slist = sbucket->slist;
ssize_t * const shead = sbucket->list_head;
int32_t const sup = supports[e];
/* fill data */
slist[e].prev_eid = -1;
slist[e].next_eid = shead[sup];
slist[e].support = sup;
/* update doubly-linked list */
if(shead[sup] != -1) {
slist[shead[sup]].prev_eid = e;
}
shead[sup] = e;
++sbucket->nowned_edges;
} /* foreach edge */
return sbuckets;
}
void sbucket_free(
support_bucket_t * sbucket)
{
--(sbucket->slist);
gk_free((void **) &(sbucket->slist), LTERM);
/* XXX this is all wrong and does not account for multi buckets... */
gk_free((void **) &sbucket->list_head, LTERM);
gk_free((void **) &sbucket, LTERM);
}
|
GB_unaryop__minv_fp32_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_fp32_uint8
// op(A') function: GB_tran__minv_fp32_uint8
// C type: float
// A type: uint8_t
// cast: float cij = (float) aij
// unaryop: cij = (1.0F)/aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = (1.0F)/x ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FP32 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_fp32_uint8
(
float *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_fp32_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias) {
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float *kernel = _kernel;
const float *bias = _bias;
#pragma omp parallel for
for (int p = 0; p < outch; p++) {
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++) {
float *outptr = out;
float *outptr2 = outptr + outw;
const float *img0 = bottom_blob.channel(q);
const float *kernel0 = kernel + p * inch * 9 + q * 9;
const float *r0 = img0;
const float *r1 = img0 + w;
const float *r2 = img0 + w * 2;
const float *r3 = img0 + w * 3;
const float *k0 = kernel0;
const float *k1 = kernel0 + 3;
const float *k2 = kernel0 + 6;
int i = 0;
for (; i + 1 < outh; i += 2) {
int remain = outw;
for (; remain > 0; remain--) {
float sum = 0;
float sum2 = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr += sum;
*outptr2 += sum2;
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++) {
int remain = outw;
for (; remain > 0; remain--) {
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
}
|
GB_deserialize_from_blob.c | //------------------------------------------------------------------------------
// GB_deserialize_from_blob: uncompress a set of blocks from the blob
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Decompress a single array from a set of compressed blocks in the blob. If
// the input data is mangled, this method is still safe, since it performs the
// bare minimum sanity checks to ensure no out-of-bounds indexing of arrays.
// However, the contents of output array are not fully checked. This step is
// done by GB_deserialize, if requested.
#include "GB.h"
#include "GB_serialize.h"
#include "GB_lz4.h"
#define GB_FREE_ALL \
{ \
GB_FREE (&X, X_size) ; \
}
GrB_Info GB_deserialize_from_blob
(
// output:
GB_void **X_handle, // uncompressed output array
size_t *X_size_handle, // size of X as allocated
// input:
int64_t X_len, // size of X in bytes
const GB_void *blob, // serialized blob of size blob_size
size_t blob_size,
int64_t *Sblocks, // array of size nblocks
int32_t nblocks, // # of compressed blocks for this array
int32_t method, // compression method used for each block
// input/output:
size_t *s_handle, // location to write into the blob
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
ASSERT (blob != NULL) ;
ASSERT (s_handle != NULL) ;
ASSERT (X_handle != NULL) ;
ASSERT (X_size_handle != NULL) ;
(*X_handle) = NULL ;
(*X_size_handle) = 0 ;
//--------------------------------------------------------------------------
// parse the method
//--------------------------------------------------------------------------
bool intel ;
int32_t algo, level ;
GB_serialize_method (&intel, &algo, &level, method) ;
// method = (intel ? GxB_COMPRESSION_INTEL : 0) + (algo) + (level) ;
//--------------------------------------------------------------------------
// allocate the output array
//--------------------------------------------------------------------------
size_t X_size = 0 ;
GB_void *X = GB_MALLOC (X_len, GB_void, &X_size) ;
if (X == NULL)
{
// out of memory
return (GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// decompress the blocks from the blob
//--------------------------------------------------------------------------
size_t s = (*s_handle) ;
bool ok = true ;
if (algo == GxB_COMPRESSION_NONE)
{
//----------------------------------------------------------------------
// no compression; the array is held in a single block
//----------------------------------------------------------------------
if (nblocks != 1 || Sblocks [0] != X_len || s + X_len > blob_size)
{
// blob is invalid: guard against an unsafe memcpy
ok = false ;
}
else
{
// copy the blob into the array X. This is now safe and secure.
// The contents of X are not yet checked, however.
GB_memcpy (X, blob + s, X_len, nthreads_max) ;
}
}
else if (algo == GxB_COMPRESSION_LZ4 || algo == GxB_COMPRESSION_LZ4HC)
{
//----------------------------------------------------------------------
// LZ4 / LZ4HC compression
//----------------------------------------------------------------------
int nthreads = GB_IMIN (nthreads_max, nblocks) ;
int32_t blockid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic) \
reduction(&&:ok)
for (blockid = 0 ; blockid < nblocks ; blockid++)
{
// get the start and end of the compressed and uncompressed blocks
int64_t kstart, kend ;
GB_PARTITION (kstart, kend, X_len, blockid, nblocks) ;
int64_t s_start = (blockid == 0) ? 0 : Sblocks [blockid-1] ;
int64_t s_end = Sblocks [blockid] ;
size_t s_size = s_end - s_start ;
size_t d_size = kend - kstart ;
// ensure s_start, s_end, kstart, and kend are all valid,
// to avoid accessing arrays out of bounds, if input is corrupted.
if (kstart < 0 || kend < 0 || s_start < 0 || s_end < 0 ||
kstart >= kend || s_start >= s_end || s_size > INT32_MAX ||
s + s_start > blob_size || s + s_end > blob_size ||
kstart > X_len || kend > X_len || d_size > INT32_MAX)
{
// blob is invalid
ok = false ;
}
else
{
// uncompress the compressed block of size s_size
// from blob [s + s_start:s_end-1] into X [kstart:kend-1].
// This is safe and secure so far. The contents of X are
// not yet checked, however. That step is done in
// GB_deserialize, if requested.
const char *src = (const char *) (blob + s + s_start) ;
char *dst = (char *) (X + kstart) ;
int src_size = (int) s_size ;
int dst_size = (int) d_size ;
int u = LZ4_decompress_safe (src, dst, src_size, dst_size) ;
if (u != dst_size)
{
// blob is invalid
ok = false ;
}
}
}
}
else
{
// unknown compression method
ok = false ;
}
if (!ok)
{
// decompression failure; blob is invalid
GB_FREE_ALL ;
return (GrB_INVALID_OBJECT) ;
}
//--------------------------------------------------------------------------
// return result: X, its size, and updated index into the blob
//--------------------------------------------------------------------------
(*X_handle) = X ;
(*X_size_handle) = X_size ;
s += Sblocks [nblocks-1] ;
(*s_handle) = s ;
return (GrB_SUCCESS) ;
}
|
GB_binop__isne_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isne_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__isne_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__isne_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__isne_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_fp32)
// A*D function (colscale): GB (_AxD__isne_fp32)
// D*A function (rowscale): GB (_DxB__isne_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__isne_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__isne_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_fp32)
// C=scalar+B GB (_bind1st__isne_fp32)
// C=scalar+B' GB (_bind1st_tran__isne_fp32)
// C=A+scalar GB (_bind2nd__isne_fp32)
// C=A'+scalar GB (_bind2nd_tran__isne_fp32)
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_FP32 || GxB_NO_ISNE_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isne_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isne_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isne_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isne_fp32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isne_fp32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isne_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isne_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isne_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isne_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isne_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isne_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isne_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__isne_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__isne_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB054-inneronly2-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Example with loop-carried data dependence at the outer level loop.
The inner level loop can be parallelized.
*/
int main()
{
int i,j;
int n=100, m=100;
double b[n][m];
for(i=0;i<n; i++)
for(j=0;j<n; j++)
b[i][j]=(double)(i*j);
for (i=1;i<n;i++)
#pragma omp parallel for
for (j=1;j<m;j++)
b[i][j]=b[i-1][j-1];
return 0;
}
|
GB_binop__isle_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isle_uint8)
// A.*B function (eWiseMult): GB (_AemultB_01__isle_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__isle_uint8)
// A.*B function (eWiseMult): GB (_AemultB_03__isle_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_uint8)
// A*D function (colscale): GB (_AxD__isle_uint8)
// D*A function (rowscale): GB (_DxB__isle_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__isle_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__isle_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_uint8)
// C=scalar+B GB (_bind1st__isle_uint8)
// C=scalar+B' GB (_bind1st_tran__isle_uint8)
// C=A+scalar GB (_bind2nd__isle_uint8)
// C=A'+scalar GB (_bind2nd_tran__isle_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLE || GxB_NO_UINT8 || GxB_NO_ISLE_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isle_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isle_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isle_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isle_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isle_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isle_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isle_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isle_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isle_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isle_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isle_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isle_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__isle_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__isle_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_task_shared.c | <ompts:test>
<ompts:testdescription> Test to see if implied shared works correctly</ompts:testdescription>
<ompts:ompversion>3.0</ompts:ompversion>
<ompts:directive>omp task</ompts:directive>
<ompts:dependences>omp single, omp task firstprivate</ompts:dependences>
<ompts:testcode>
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
/* Utility function do spend some time in a loop */
int <ompts:testcode:functionname>omp_task_shared</ompts:testcode:functionname> (FILE * logFile)
{
<ompts:orphan:vars>
int i;
</ompts:orphan:vars>
i=0;
int k = 0;
int result = 0;
#pragma omp parallel
{
#pragma omp single
for (k = 0; k < NUM_TASKS; k++)
{
<ompts:orphan>
#pragma omp task <ompts:crosscheck> firstprivate(i) </ompts:crosscheck> <ompts:check> shared(i)</ompts:check>
{
#pragma omp atomic
i++;
//this should be shared implicitly
}
</ompts:orphan>
}
}
result = i;
return ((result == NUM_TASKS));
}
</ompts:testcode>
</ompts:test>
|
omp.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <omp.h>
//------------------------------------------------------------------------------------------------------------------------------
void defaultThreadingForLevel(level_type *level){
int omp_threads = 1;
int omp_nested = 0;
#pragma omp parallel
{
#pragma omp master
{
omp_threads = omp_get_num_threads();
omp_nested = omp_get_nested();
}
}
// set default parameters for threading...
level->threads_per_box = omp_threads;
level->concurrent_boxes = 1;
}
//------------------------------------------------------------------------------------------------------------------------------
void tuneThreadingForLevel(level_type *level){
int omp_threads = 1;
int omp_nested = 0;
#pragma omp parallel
{
#pragma omp master
{
omp_threads = omp_get_num_threads();
omp_nested = omp_get_nested();
}
}
// inspect omp_nested, omp_num_threads, the number of boxes, and the box size, and choose the optimal varlues for
// threads_per_box
// concurrent_boxes
}
//------------------------------------------------------------------------------------------------------------------------------
|
GB_unaryop__abs_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_fp64_fp64
// op(A') function: GB_tran__abs_fp64_fp64
// C type: double
// A type: double
// cast: double cij = (double) aij
// unaryop: cij = fabs (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = fabs (x) ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_fp64_fp64
(
double *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
krb5-18_fmt_plug.c | /*
* KRB5 - Enctype 18 (aes256-cts-hmac-sha1-96) cracker patch for JtR
* Created on August of 2012 by Mougey Camille (CEA/DAM) & Lalet Pierre (CEA/DAM)
*
* This format is one of formats saved in KDC database and used during the authentication part
*
* This software is Copyright (c) 2012, Mougey Camille (CEA/DAM)
* Lalet Pierre (CEA/DAM)
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
* Input Format :
* - user:$krb18$REALMname$hash
* - user:REALMname$hash
*
* Format rewritten Dec, 2014, without use of -lkrb5, by JimF. Now we use 'native' JtR
* pbkdf2-hmac-sha1() and simple call to 2 AES limb encrypt for entire process. Very
* simple, and 10x faster, and no obsure -lkrb5 dependency
*/
#if AC_BUILT
#include "autoconfig.h"
#endif
#if FMT_EXTERNS_H
extern struct fmt_main fmt_krb5_18;
#elif FMT_REGISTERS_H
john_register_one(&fmt_krb5_18);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "params.h"
#include "options.h"
#include "simd-intrinsics.h"
#include "pbkdf2_hmac_sha1.h"
#include "aes.h"
#ifdef _OPENMP
#include <omp.h>
#ifdef SIMD_COEF_32
#ifndef OMP_SCALE
#define OMP_SCALE 8
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 32
#endif
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "krb5-18"
#define FORMAT_NAME "Kerberos 5 db etype 18"
#define FORMAT_TAG "$krb18$"
#define TAG_LENGTH 7
#if SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME " AES"
#else
#define ALGORITHM_NAME "PBKDF2-SHA1 32/" ARCH_BITS_STR " AES"
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 64
#define CIPHERTEXT_LENGTH 64
#define BINARY_SIZE 32
#define BINARY_ALIGN 4
#define SALT_SIZE CIPHERTEXT_LENGTH
#define SALT_ALIGN 1
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests kinit_tests[] = {
{"OLYMPE.OLtest$214bb89cf5b8330112d52189ab05d9d05b03b5a961fe6d06203335ad5f339b26", "password"},
{FORMAT_TAG "OLYMPE.OLtest$214bb89cf5b8330112d52189ab05d9d05b03b5a961fe6d06203335ad5f339b26",
"password"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static char saved_salt[SALT_SIZE+1];
static ARCH_WORD_32 (*crypt_out)[16];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q;
p = ciphertext;
if (!strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
p = strstr(p, "$");
if(p == NULL)
return 0;
q = ciphertext;
if(p - q > SALT_SIZE) /* check salt length */
return 0;
q = ++p;
while (atoi16l[ARCH_INDEX(*q)] != 0x7F) {
q++;
}
return !*q && q - p == CIPHERTEXT_LENGTH;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + SALT_SIZE + 1];
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
return ciphertext;
memcpy(out, FORMAT_TAG, TAG_LENGTH);
strnzcpyn(out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH + SALT_SIZE + 1);
return out;
}
static void *get_salt(char *ciphertext)
{
static char out[SALT_SIZE+1];
char *p, *q;
memset(&out, 0, sizeof(out));
p = ciphertext + TAG_LENGTH;
q = strstr(p, "$");
strncpy(out, p, q-p);
out[q-p] = 0;
return out;
}
static void set_salt(void *salt)
{
strcpy(saved_salt, salt);
}
static void *get_binary(char *ciphertext)
{
static unsigned char *out;
char *p;
int i = 0;
if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD);
p = ciphertext;
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
p = strstr(p, "$") + 1;
for (; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int crypt_all(int *pcount, struct db_salt *_salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char key[32], i;
AES_KEY aeskey;
#ifdef SSE_GROUP_SZ_SHA1
ARCH_WORD_32 Key[SSE_GROUP_SZ_SHA1][32/4];
int lens[SSE_GROUP_SZ_SHA1];
unsigned char *pin[SSE_GROUP_SZ_SHA1];
union {
ARCH_WORD_32 *pout[SSE_GROUP_SZ_SHA1];
unsigned char *poutc;
} x;
for (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
x.pout[i] = Key[i];
}
pbkdf2_sha1_sse((const unsigned char **)pin, lens, (const unsigned char*)saved_salt, strlen(saved_salt), 4096, &(x.poutc), 32, 0);
#else
pbkdf2_sha1((const unsigned char*)saved_key[index], strlen(saved_key[index]), (const unsigned char*)saved_salt, strlen(saved_salt), 4096, key, 32, 0);
#endif
i=0;
#ifdef SSE_GROUP_SZ_SHA1
for (; i < SSE_GROUP_SZ_SHA1; ++i) {
memcpy(key, Key[i], 32);
#endif
AES_set_encrypt_key(key, 256, &aeskey);
AES_encrypt((unsigned char*)"kerberos{\x9b[+\x93\x13+\x93", (unsigned char*)(crypt_out[index+i]), &aeskey);
AES_encrypt((unsigned char*)(crypt_out[index+i]), (unsigned char*)&crypt_out[index+i][4], &aeskey);
#ifdef SSE_GROUP_SZ_SHA1
}
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (; index < count; index++)
#endif
if (crypt_out[index][0] == *(ARCH_WORD_32*)binary)
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
struct fmt_main fmt_krb5_18 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
kinit_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact,
}
};
#endif /* plugin stanza */
|
edist.c | // Licensed under a 3-clause BSD style license - see LICENSE
#include "common.h"
#include "math.h"
#include "gsl/gsl_math.h"
#include "gsl/gsl_deriv.h"
#include "stdio.h"
double P_THRESH=1e-4;
/*****************************************************************************/
// Plain electron distributions no normalization
// params : p, gamma_min, gamma_max
double powerlaw(double gamma, void *params){
double *p = (double*) params;
return pow(gamma, -p[0]);
}
// params : p, gamma_min, gamma_max
double powerlawexpcutoff(double gamma, void *params){
double *p = (double*) params;
return powerlaw(gamma, params) * exp(-gamma/p[2]);
}
// params : theta
double thermal(double gamma, void *params){
double *p = (double*) params;
return gamma*sqrt(gamma*gamma-1)*exp(-gamma/p[0]);
}
// params : kappa, kappa_width
double kappa(double gamma, void *params){
double *p = (double*) params;
return gamma*sqrt(gamma*gamma-1)*pow(1+(gamma-1)/(p[0]*p[1]),-p[0]-1);
}
//params : p1, p2, gamma_b, gamma_min, gamma_max
double bknpowerlaw(double gamma, void *params){
double *p = (double*) params;
double factor = pow(p[2], p[1]-p[0]);
return (gamma < p[2]) ? pow(gamma, -p[0]) : factor*pow(gamma, -p[1]);
}
//params : p1, p2, gamma_b, gamma_min, gamma_max
double bknpowerlawexpcutoff(double gamma, void *params){
double *p = (double*) params;
return bknpowerlaw(gamma, params) * exp(-gamma/p[4]);
}
/*****************************************************************************/
// Electron distribution normalizations
double powerlaw_norm(void* params){
double *p = (double*) params;
return (P_THRESH > fabs(p[0] - 1)) ? (log(p[2])-log(p[1])) : (pow(p[1], -p[0]+1)-pow(p[2], -p[0]+1))/(p[0]-1);
}
double bknpowerlaw_norm(void* params){
double *p = (double*) params;
double factor = pow(p[2], p[1]-p[0]);
double norm1 = (P_THRESH > fabs(p[0] - 1)) ? (log(p[2])-log(p[3])) : (pow(p[3], -p[0]+1)-pow(p[2], -p[0]+1))/(p[0]-1);
double norm2 = (P_THRESH > fabs(p[1] - 1)) ? factor*(log(p[4])-log(p[2])) : factor*(pow(p[2], -p[1]+1)-pow(p[4], -p[1]+1))/(p[1]-1);
return norm1+norm2;
}
double thermal_norm(void* params){
double *p = (double*) params;
return (p[0]*BESSELK(2, 1/p[0]));
}
// Pandya+ 2016, ApJ 822:34 / Sec. 3.3
double kappa_norm(void* params){
double *p = (double*) params;
double norm_low, norm_high;
norm_high = (p[0]-2)*(p[0]-1)/(2* (p[0]*p[0]) * (p[1]*p[1]*p[1]));
norm_low = pow(2/(M_PI* (p[0]*p[0]*p[0]) * (p[1]*p[1]*p[1])), 0.5);
norm_low = norm_low * GAMMAF(p[0] + 1);
norm_low = norm_low / GAMMAF(p[0] - 0.5);
return pow(pow(norm_low, -0.7) + pow(norm_high, -0.7), 1/0.7);
}
/*****************************************************************************/
// Commmon interface for electron distributions - no normalization
double eDist_s(double gamma, Source* source_t){
return source_t->d_func(gamma, (void*) source_t->params);
}
double deDistdgam_s(double gamma, Source* source_t){
gsl_function F;
F.function = source_t->d_func;
F.params = (void*) source_t->params;
double err;
double res;
double h = 1e-8;
gsl_deriv_central(&F, gamma, h, &res, &err);
return res;
}
/*****************************************************************************/
// Electron distributions from gamma array. Full math.
int deDistdgam(double *res, int sz, double *gamma, Source* source_t){
double norm = source_t->n_func((void*) source_t->params);
int i=0;
#pragma omp parallel for
for (i=0; i<sz; i++){
res[i] = source_t->ne*deDistdgam_s(gamma[i], source_t)/norm;
}
return 0;
}
int eDist(double *res, int sz, double *gamma, Source* source_t){
double norm = source_t->n_func((void*) source_t->params);
int i=0;
#pragma omp parallel for
for (i=0; i<sz; i++){
res[i] = source_t->ne*eDist_s(gamma[i], source_t)/norm;
}
return 0;
}
/*****************************************************************************/
|
Searching.202007271527.gather_top_m.subsearch.h | //
// Created by Zhen Peng on 7/27/2020.
//
#ifndef BATCH_SEARCHING_SEARCHING_H
#define BATCH_SEARCHING_SEARCHING_H
#include <vector>
#include <boost/dynamic_bitset.hpp>
//#include <boost/sort/sort.hpp>
#include <iostream>
#include <fstream>
#include <unordered_map>
#include <immintrin.h>
#include <cstring>
#include <unordered_set>
#include <set>
#include <cfloat>
#include <algorithm>
//#include <omp.h>
#include "../include/definitions.h"
//#include "../include/efanna2e/neighbor.h"
#include "../include/utils.h"
#include "../include/Candidate.h"
#include "../include/parallelization.h"
#include "../include/bitvector.h"
namespace PANNS {
class Searching {
//private:
public:
idi num_v_ = 0;
edgei num_e_ = 0;
idi num_queries_ = 0;
uint64_t dimension_ = 0;
idi width_ = 0; // NSG largest degree
idi ep_ = 0; // Start point
// std::vector<dataf> data_load_;
// std::vector<dataf> queries_load_;
// std::vector< std::vector<dataf> > data_load_;
// std::vector< std::vector<dataf> > queries_load_;
// std::vector<distf> norms_;
dataf *data_load_ = nullptr;
dataf *queries_load_ = nullptr;
// dataf *norms_;
// std::vector< std::vector<idi> > nsg_graph_;
// idi *nsg_graph_indices_;
// idi *nsg_graph_out_edges_;
// std::vector< std::vector<idi> > edge_list_;
char *opt_nsg_graph_ = nullptr;
uint64_t data_bytes_;
uint64_t neighbor_bytes_;
uint64_t vertex_bytes_;
// For multithreads
int num_threads_ = 1;
// int num_real_threads_ = 1;
// int num_threads_intra_query_ = 1;
// int num_threads_inter_query_ = 1;
dataf compute_norm(
const dataf *data) const;
// idi vertex_id);
// const std::vector<PANNS::dataf> &data);
// size_t loc_start,
// idi dimension)
dataf compute_distance_with_norm(
const dataf *v_data,
const dataf *q_data,
// idi vertex_id,
// idi query_id,
// const std::vector<dataf> &d_data,
// const std::vector<dataf> &q_data,
// PANNS::idi d_start,
// PANNS::idi q_start,
const dataf vertex_norm) const;
// idi dimension)
static idi insert_into_queue(
std::vector<Candidate> &c_queue,
idi c_queue_top,
Candidate cand);
static idi add_into_queue(
std::vector<PANNS::Candidate> &queue,
idi &queue_top,
const idi queue_size,
const PANNS::Candidate &cand);
static idi add_into_queue(
std::vector<PANNS::Candidate> &queue,
const idi queue_start,
idi &queue_size,
const idi queue_capacity,
const PANNS::Candidate &cand);
static void add_into_queue_at(
const Candidate &cand,
std::vector<Candidate> &queue,
const idi insert_index, // The insertion location, independent with queue_start
const idi queue_start,
idi &queue_top, // The number of elements in queue, independent with queue_start
const idi queue_size); // The maximum capacity of queue, independent with queue_start.
static void insert_one_element_at(
// const T &cand,
// T *queue_base,
const Candidate &cand,
std::vector<Candidate> &queue_base,
const idi insert_index,
const idi queue_start,
const idi queue_size);
// idi insert_into_queue_nsg(
// std::vector< Candidate > &c_queue,
// idi c_queue_top,
// Candidate cand);
static idi merge_two_queues_into_1st_queue_seq_fixed(
std::vector<Candidate> &queue1,
const idi queue1_start,
const idi queue1_size,
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size);
static void merge_two_queues_into_1st_queue_seq_incr(
std::vector<Candidate> &queue1,
const idi queue1_start,
idi &queue1_size, // The number of element in queue1, independent with queue1_start.
const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start.
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size);
idi merge_all_queues_para_list(
std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<idi> &local_queues_ends,
std::vector<Candidate> &set_L,
const idi L);
// idi merge_all_queues_para_array(
//// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<Candidate> &local_queues_array,
// std::vector<idi> &local_queues_ends,
// const idi local_queue_length,
// std::vector<Candidate> &set_L,
// const idi L);
idi merge_all_queues_para_array(
std::vector<Candidate> &set_L,
// std::vector<Candidate> &local_queues_array,
std::vector<idi> &local_queues_ends,
const idi local_queue_length,
// std::vector<Candidate> &set_L,
const idi L);
idi merge_all_queues_queue_base(
// std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<Candidate> &set_L,
// std::vector<Candidate> &local_queues_array,
std::vector<idi> &local_queues_ends,
const idi queue_base,
const int real_threads,
const idi local_queue_length,
// std::vector<Candidate> &set_L,
const idi L);
void merge_two_consecutive_queues_in_place(
std::vector<Candidate> &two_queues,
const idi base_1,
// const idi &end_1,
const idi base_2,
const idi &length_2);
void merge_in_set_L(
std::vector<Candidate> &set_L,
const idi set_L_length,
const idi num_queues,
const idi local_queue_length);
distf selecting_top_L_seq(
std::vector<Candidate> &set_L,
const idi global_L,
// const idi local_L,
const idi num_queues,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes);
void selecting_unchecked_top_M_seq(
const idi query_id,
const idi iter,
std::vector<Candidate> &set_L,
const std::vector<idi> &pointers_starts,
const idi value_M,
const idi num_queues,
const std::vector<idi> &local_queues_starts,
const std::vector<idi> &local_queues_sizes,
std::vector<idi> &local_m_counts);
void gather_unchecked_top_M_seq(
const idi query_id,
const idi iter,
std::vector<Candidate> &set_L,
const std::vector<idi> &pointers_starts,
const idi value_M,
const idi num_queues,
const std::vector<idi> &local_queues_starts,
const std::vector<idi> &local_queues_sizes,
std::vector<idi> &top_m_candidates,
idi &top_m_candidates_size,
std::vector<idi> &bound_subs);
// idi merge_all_queues_all_together_in_sequential(
// std::vector<Candidate> &set_L,
// std::vector<idi> &local_queues_ends,
// const idi local_queue_length,
// const idi L);
// idi min_all_queues_at_heads(
// const std::vector<Candidate> &set_L,
// std::vector<idi> &queue_heads,
// const std::vector<idi> &local_queues_ends,
// const idi local_queue_length,
// const idi L);
public:
// For Profiling
// L3CacheMissRate cache_miss_kernel;
uint64_t count_distance_computation_ = 0;
// uint64_t count_add_to_queue_ = 0;
// uint64_t count_single_query_computation_ = 0;
// distf dist_min_ = 0;
// distf dist_max_ = 0;
double time_merge_ = 0;
double time_select_ = 0;
// double time_select_L_ = 0.0;
// double time_select_M_ = 0.0;
// double time_initialization_ = 0;
// double time_sequential_phase_ = 0;
// double time_parallel_phase_ = 0;
// double time_ending_ = 0.0;
// double time_assign_s_ = 0.0;
// double time_expand_ = 0.0;
// double time_pick_top_m_ = 0.0;
// double time_distance_computation_ = 0.0;
// double time_add_to_queue_ = 0.0;
// double time_insert_ = 0;
// double time_compare_minimum_ = 0;
// double time_memmove_ = 0;
// std::vector<double> time_memmove_list_;
// L3CacheMissRate profile_miss_rate;
// uint64_t number_local_elements_ = 0;
// std::vector<idi> L_ids_;
// std::vector<idi> M_ids_;
~Searching()
{
free(data_load_);
data_load_ = nullptr;
// free(queries_load_);
// _mm_free(data_load_);
free(queries_load_);
queries_load_ = nullptr;
// free(norms_);
// free(nsg_graph_indices_);
// free(nsg_graph_out_edges_);
free(opt_nsg_graph_);
opt_nsg_graph_ = nullptr;
}
void load_data_load(char *filename);
void load_queries_load(char *filename);
void load_nsg_graph(char *filename);
// void build_opt_graph();
void prepare_init_ids(
std::vector<unsigned> &init_ids,
const unsigned L) const;
// void prepare_candidate_queue_list(
// const float *query_load,
// std::vector<std::vector<efanna2e::Neighbor> > &retset_list,
// std::vector<boost::dynamic_bitset<> > &is_visited_list,
// const std::vector<unsigned> &init_ids,
// const boost::dynamic_bitset<> &flags,
// unsigned batch_start,
// unsigned batch_size,
// unsigned L);
// void search_in_batch(
//// const float *query_load,
// size_t K,
// size_t L,
// unsigned batch_start,
// unsigned batch_size,
// std::vector< std::vector<Candidate> > &set_L_list,
// std::vector< boost::dynamic_bitset<> > &is_visited_list,
// const std::vector<idi> &init_ids,
// const boost::dynamic_bitset<> &is_visited,
// std::vector<std::vector<idi> > &set_K_list);
void search_in_sequential(
idi query_id,
idi K,
idi L,
std::vector<Candidate> &set_L,
// boost::dynamic_bitset<> &is_visited,
// boost::dynamic_bitset<> is_visited,
// std::vector<idi> &init_ids,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K);
// void search_in_sequential_BitVector(
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// idi get_out_degree(idi v_id) const
// {
// if (v_id < num_v_ - 1) {
// return nsg_graph_indices_[v_id + 1] - nsg_graph_indices_[v_id];
// } else {
// return num_e_ - nsg_graph_indices_[v_id];
// }
// }
void search_with_top_m(
idi M,
idi query_id,
idi K,
idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K);
// std::vector< std::vector<idi> > &top_m_list);
void search_with_top_m_scale_m(
const PANNS::idi value_M_max,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited);
// void search_with_top_m_myths_M(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void search_with_top_m_to_get_distance_range(
// const PANNS::idi M,
// const PANNS::idi query_id,
//// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids);
// void search_with_top_m_profile_bit_CAS(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void search_with_top_m_no_local_arrays(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// boost::dynamic_bitset<> &is_visited);
void search_with_top_m_in_batch(
PANNS::idi M,
PANNS::idi batch_start,
PANNS::idi batch_size,
PANNS::idi K,
PANNS::idi L,
std::vector< std::vector<Candidate> > &set_L_list,
const std::vector<idi> &init_ids,
std::vector< std::vector<idi> > &set_K_list);
// void para_search_with_top_m_critical_area(
// idi M,
// idi query_id,
// idi K,
// idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void para_search_with_top_m_critical_area_no_omp(
// idi M,
// idi query_id,
// idi K,
// idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void para_search_with_top_m_critical_area_yes_omp(
// idi M,
// idi query_id,
// idi K,
// idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void para_search_with_top_m_visited_array(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// std::vector<uint8_t> &is_visited);
// void para_search_with_top_m_merge_queues(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void para_search_with_top_m_queues_seq_merge(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void para_search_with_top_m_merge_queues_no_CAS(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length,
// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<idi> &local_queues_ends,
//// std::vector<uint8_t> &is_visited);
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_in_array(
// void para_search_with_top_m_merge_queues_new_threshold(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
//// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<Candidate> &local_queues_array,
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// BitVector &is_visited);
// void para_search_with_top_m_merge_queues_by_sort(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
//// std::vector<Candidate> &local_queues_array,
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<idi> &dest_offsets,
// const std::vector<idi> &offsets_load_set_L, // Offsets for store into set_L.
// BitVector &is_visited);
// void para_search_with_top_m_merge_queues_better_merge_v0(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited);
// boost::dynamic_bitset<> &is_visited);
//// BitVector &is_visited);
// void para_search_with_top_m_merge_queues_better_merge_v2(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited,
// std::vector<distf> &local_thresholds);
//// BitVector &is_visited)
// void para_search_with_top_m_merge_queues_better_merge_v1(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<Candidate> &top_m_candidates,
//// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited);
// boost::dynamic_bitset<> &is_visited);
//// BitVector &is_visited);
// void para_search_with_top_m_merge_queues_better_merge_v0_0(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
//// BitVector &is_visited)
// void para_search_with_top_m_merge_queues_less_merge(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited,
// std::vector<distf> &local_thresholds);
//// BitVector &is_visited)
// void para_search_with_top_m_merge_queues_no_merge(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited,
// std::vector<distf> &local_thresholds,
// const uint64_t computation_threshold);
// void para_search_with_top_m_merge_queues_scale_m_v0(
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited);
// boost::dynamic_bitset<> &is_visited);
void para_search_with_top_m_merge_queues_middle_m(
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<Candidate> &top_m_candidates,
std::vector<idi> &top_m_candidates,
// std::vector<uint8_t> &is_visited)
boost::dynamic_bitset<> &is_visited);
// std::vector<distf> &local_thresholds);
// BitVector &is_visited)
// void para_search_with_top_m_merge_queues_scale_m_v2(
// const idi value_M_min,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_scale_m_v3(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
void para_search_with_top_m_merge_queues_middle_m_no_merge(
const uint64_t computation_threshold,
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
const idi init_size,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector<idi> &local_queues_ends, // Sizes of local queue
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited);
void para_search_with_top_m_merge_queues_sequential_merge(
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector<idi> &local_queues_ends, // Sizes of local queue
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited);
void para_search_with_top_m_nested_para(
const idi batch_start,
const idi batch_size,
const idi value_M_middle,
const idi value_M_max,
const idi K,
const idi L,
std::vector< std::vector<Candidate> > &set_L_list,
const std::vector<idi> &init_ids,
std::vector< std::vector<idi> > &set_K_list,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector< std::vector<idi> > &local_queues_ends_list, // Sizes of local queue
std::vector< std::vector<idi> > &top_m_candidates_list,
std::vector< boost::dynamic_bitset<> > &is_visited_list);
void subsearch_with_top_m(
const idi value_M_max,
const idi query_id,
const idi local_L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &local_top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &local_count_distance_computation);
void subsearch_top_m_for_one_iteration(
const idi iter,
idi &k_uc,
const idi value_M,
const idi query_id,
const dataf *query_data,
const idi L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &count_distance_computation);
// void subsearch_top_m_for_one_iteration_lth(
// const distf bound_lth,
// const idi iter,
// idi &k_uc,
// const idi value_M,
// const idi query_id,
// const dataf *query_data,
// const idi L,
// std::vector<Candidate> &set_L,
// const idi set_L_start,
// idi &set_L_size,
// std::vector<idi> &top_m_candidates,
// boost::dynamic_bitset<> &is_visited,
// uint64_t &count_distance_computation);
void subsearch_top_m_for_one_iteration_lth_mth(
const distf bound_lth,
// const idi top_m_position,
const idi iter,
idi &k_uc,
const idi local_m_count,
const idi query_id,
const dataf *query_data,
const idi L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &count_distance_computation,
double &time_pick_top_m,
uint64_t &count_add_to_queue,
double &time_distance_computation,
double &time_add_to_queue);
// void para_search_with_top_m_subsearch_v3(
// const idi local_M_max,
// const idi local_M_middle,
// const idi query_id,
// const idi K,
// const idi global_L,
// const idi local_L,
//// const idi total_L,
//// const idi init_queue_size,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const std::vector<idi> &local_queues_starts,
// std::vector<idi> &local_queues_sizes,
// std::vector<idi> &local_m_counts,
// std::vector< std::vector<idi> > &top_m_candidates_list,
// boost::dynamic_bitset<> &is_visited);
void para_search_with_top_m_subsearch_v4(
const idi local_M_max,
const idi local_M_middle,
const idi query_id,
const idi K,
const idi global_L,
const idi local_L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes,
// std::vector<idi> &local_m_counts,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited);
void subsearch_for_simple_search(
const idi query_id,
const idi local_L,
std::vector<Candidate> &set_L,
const idi base_set_L,
idi &set_L_end,
// std::vector<uint8_t> &is_visited,
boost::dynamic_bitset<> &is_visited,
uint64_t &local_count_distance_computation);
void para_simple_search_subsearch(
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
// std::vector<uint8_t> &is_visited);
boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_global_threshold(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<idi> &top_m_candidates,
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_distance_threshold_m(
//// const idi value_M_middle,
//// const idi value_M_max,
// const distf relative_dist_threshold,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
//// std::vector<distf> &local_thresholds)
//// BitVector &is_visited)
// void para_search_with_top_m_merge_queues_distance_threshold_m_middle_iteration(
//// const idi value_M_middle,
//// const idi value_M_max,
// const distf relative_dist_threshold,
// const idi middle_iteration,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_collectors(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_selecting(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_myths(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
//// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<Candidate> &local_queues_array,
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// BitVector &is_visited);
//// std::vector<uint8_t> &is_visited);
//// boost::dynamic_bitset<> &is_visited);
//// void para_prepare_init_ids(
//// std::vector<unsigned> &init_ids,
//// unsigned L) const;
// void para_search_with_top_m_in_batch_embarassing_para(
// const PANNS::idi M,
// const PANNS::idi batch_start,
// const PANNS::idi batch_size,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector< std::vector<Candidate> > &set_L_list,
// const std::vector<idi> &init_ids,
// std::vector< std::vector<idi> > &set_K_list,
// std::vector< boost::dynamic_bitset<> > &is_visited_list);
// void test_neighbors_distance_to_father(
// const idi num_selected) const;
// void test_neighbors_normalized_distance_to_father(
// const idi num_selected) const;
void load_true_NN(
const char *filename,
std::vector< std::vector<idi> > &true_nn_list);
void get_recall_for_all_queries(
const std::vector< std::vector<idi> > &true_nn_list,
const std::vector<std::vector<unsigned>> &set_K_list,
std::unordered_map<unsigned, double> &recalls) const;
}; // Class Searching
/**
* Input the data from the file.
* @param filename
*/
inline void Searching::load_data_load(char *filename)
{
auto old_d = dimension_;
DiskIO::load_data(
filename,
data_load_,
num_v_,
dimension_);
if (old_d) {
if (old_d != dimension_) {
std::cerr << "Error: data dimension " << dimension_
<< " is not equal to query dimension " << old_d << "." << std::endl;
exit(EXIT_FAILURE);
}
}
}
/**
* Input queries from the file.
* @param filename
*/
inline void Searching::load_queries_load(char *filename)
{
auto old_d = dimension_;
DiskIO::load_data(
filename,
queries_load_,
num_queries_,
dimension_);
if (old_d) {
if (old_d != dimension_) {
std::cerr << "Error: query dimension " << dimension_
<< " is not equal to data dimension " << old_d << "." << std::endl;
exit(EXIT_FAILURE);
}
}
}
/**
* Input the NSG graph from the file.
* Reference: https://github.com/ZJULearning/nsg/blob/master/src/index_nsg.cpp
* @param filename
*/
inline void Searching::load_nsg_graph(char *filename)
{
std::ifstream fin(filename);
if (!fin.is_open()) {
std::cerr << "Error: cannot read file " << filename << " ." << std::endl;
exit(EXIT_FAILURE);
}
fin.read(reinterpret_cast<char *>(&width_), sizeof(unsigned));
fin.read(reinterpret_cast<char *>(&ep_), sizeof(unsigned));
data_bytes_ = (1 + dimension_) * sizeof(dataf);
neighbor_bytes_ = (1 + width_) * sizeof(idi);
vertex_bytes_ = data_bytes_ + neighbor_bytes_;
opt_nsg_graph_ = (char *) malloc(num_v_ * vertex_bytes_);
if (!opt_nsg_graph_) {
std::cerr << "Error: no enough memory for opt_nsg_graph_." << std::endl;
exit(EXIT_FAILURE);
}
idi v_id = 0;
num_e_ = 0;
char *base_location = opt_nsg_graph_;
while (true) {
idi degree;
fin.read(reinterpret_cast<char *>(°ree), sizeof(unsigned));
if (fin.eof()) {
break;
}
num_e_ += degree;
// std::vector<idi> tmp_ngbrs(degree);
// fin.read(reinterpret_cast<char *>(tmp_ngbrs.data()), degree * sizeof(unsigned));
// Norm and data
distf norm = compute_norm(data_load_ + v_id * dimension_);
// distf norm = compute_norm(v_id);
std::memcpy(base_location, &norm, sizeof(distf)); // Norm
memcpy(base_location + sizeof(distf), data_load_ + v_id * dimension_, dimension_ * sizeof(dataf)); // Data
base_location += data_bytes_;
// Neighbors
memcpy(base_location, °ree, sizeof(idi)); // Number of neighbors
fin.read(base_location + sizeof(idi), degree * sizeof(unsigned)); // Neighbors
// memcpy(location + sizeof(idi), tmp_ngbrs.data(), degree * sizeof(unsigned));
base_location += neighbor_bytes_;
++v_id;
}
if (v_id != num_v_) {
std::cerr << "Error: NSG data has " << v_id
<< " vertices, but origin data has " << num_v_ << " vertices." << std::endl;
exit(EXIT_FAILURE);
}
free(data_load_);
data_load_ = nullptr;
// ////////////////////////
// idi v_id = 0;
// num_e_ = 0;
// while (true) {
// idi degree;
// fin.read(reinterpret_cast<char *>(°ree), sizeof(unsigned));
// if (fin.eof()) {
// break;
// }
// num_e_ += degree;
//
// std::vector<idi> ngbrs(degree);
// fin.read(reinterpret_cast<char *>(ngbrs.data()), degree * sizeof(unsigned));
//// nsg_graph_.push_back(ngbrs);
//// tmp_edge_list.push_back(ngbrs);
// edge_list_.push_back(ngbrs);
// ++v_id;
// }
// if (v_id != num_v_) {
// std::cerr << "Error: NSG data has " << v_id
// << " vertices, but origin data has " << num_v_ << " vertices." << std::endl;
// exit(EXIT_FAILURE);
// }
}
/**
* Load those true top-K neighbors (ground truth) of queries
* @param filename
* @param[out] true_nn_list
*/
inline void Searching::load_true_NN(
const char *filename,
std::vector< std::vector<idi> > &true_nn_list)
// unsigned &t_K)
{
std::ifstream fin(filename);
if (!fin.is_open()) {
fprintf(stderr, "Error: cannot open file %s\n", filename);
exit(EXIT_FAILURE);
}
idi t_query_num;
idi t_K;
// unsigned t_K;
fin.read(reinterpret_cast<char *>(&t_query_num), sizeof(t_query_num));
fin.read(reinterpret_cast<char *>(&t_K), sizeof(t_K));
// if (t_query_num != query_num) {
// fprintf(stderr, "Error: query_num %u is not equal to the record %u in true-NN file %s\n",
// query_num, t_query_num, filename);
// exit(EXIT_FAILURE);
// }
if (t_query_num < num_queries_) {
fprintf(stderr, "Error: t_query_num %u is smaller than num_queries_ %u\n", t_query_num, num_queries_);
exit(EXIT_FAILURE);
}
if (t_K < 100) {
fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K);
exit(EXIT_FAILURE);
}
// data = new unsigned[(size_t) t_query_num * (size_t) t_K];
true_nn_list.resize(t_query_num);
for (idi q_i = 0; q_i < t_query_num; ++q_i) {
true_nn_list[q_i].resize(t_K);
}
for (unsigned q_i = 0; q_i < t_query_num; ++q_i) {
// size_t offset = q_i * t_K;
for (unsigned n_i = 0; n_i < t_K; ++n_i) {
unsigned id;
float dist;
fin.read(reinterpret_cast<char *>(&id), sizeof(id));
fin.read(reinterpret_cast<char *>(&dist), sizeof(dist));
// data[offset + n_i] = id;
true_nn_list[q_i][n_i] = id;
}
}
fin.close();
}
inline void Searching::get_recall_for_all_queries(
const std::vector< std::vector<idi> > &true_nn_list,
const std::vector<std::vector<unsigned>> &set_K_list,
std::unordered_map<unsigned, double> &recalls) const
{
// if (t_K < 100) {
// fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K);
// exit(EXIT_FAILURE);
// }
if (true_nn_list[0].size() < 100) {
fprintf(stderr, "Error: Number of true nearest neighbors of a query is smaller than 100.\n");
exit(EXIT_FAILURE);
}
recalls[1] = 0.0;
recalls[5] = 0.0;
recalls[10] = 0.0;
recalls[20] = 0.0;
recalls[50] = 0.0;
recalls[100] = 0.0;
for (unsigned q_i = 0; q_i < num_queries_; ++q_i) {
// size_t offset = q_i * t_K;
for (unsigned top_i = 0; top_i < 100; ++top_i) {
unsigned true_id = true_nn_list[q_i][top_i];
for (unsigned n_i = 0; n_i < 100; ++n_i) {
if (set_K_list[q_i][n_i] == true_id) {
if (n_i < 1) recalls[1] += 1;
if (n_i < 5) recalls[5] += 1;
if (n_i < 10) recalls[10] += 1;
if (n_i < 20) recalls[20] += 1;
if (n_i < 50) recalls[50] += 1;
if (n_i < 100) recalls[100] += 1;
}
}
}
}
recalls[1] /= 1.0 * num_queries_;
recalls[5] /= 5.0 * num_queries_;
recalls[10] /= 10.0 * num_queries_;
recalls[20] /= 20.0 * num_queries_;
recalls[50] /= 50.0 * num_queries_;
recalls[100] /= 100.0 * num_queries_;
}
inline void Searching::search_in_sequential(
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K)
{
// {//test
// printf("Iteration: Relative_Distance:\n");
//// printf("Iteration: Relative_Distance:\n");
//// printf("----query: %u----\n", query_id);
// }
boost::dynamic_bitset<> is_visited(num_v_);
for (idi v_i = 0; v_i < L; ++v_i) {
is_visited[init_ids[v_i]] = true;
}
const dataf *query_data = queries_load_ + query_id * dimension_;
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
idi k = 0; // Index of every queue's first unchecked candidate.
idi tmp_count = 0; // for debug
// {// Print relative distance
//// distf top_dist = set_L[0].distance_;
// for (idi i_l = 0; i_l < L; ++i_l) {
// printf("%u %f\n",
// tmp_count, set_L[i_l].distance_);
//// tmp_count, set_L[i_l].distance_ - top_dist);
// }
// }
while (k < L) {
Candidate &top_cand = set_L[k];
unsigned nk = L;
if (!top_cand.is_checked_) {
++tmp_count;
top_cand.is_checked_ = true;
idi v_id = top_cand.id_; // Vertex ID.
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
// Traverse v_id's all neighbors, pushing them into the queue
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = true;
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// Compute the distance
++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
Candidate cand(nb_id, dist, false);
// Insert into the queue
idi r = insert_into_queue(set_L, L, cand);
if (r < nk) {
nk = r;
}
}
// {// Print relative distance
//// distf top_dist = set_L[0].distance_;
// for (idi i_l = 0; i_l < L; ++i_l) {
// printf("%u %f\n",
// tmp_count, set_L[i_l].distance_);
//// tmp_count, set_L[i_l].distance_ - top_dist);
// }
// }
}
if (nk <= k) {
k = nk;
} else {
++k;
}
}
// cache_miss_kernel.measure_stop();
for (size_t k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
// {//test
// if (0 == query_id) {
// exit(1);
// }
// }
}
//inline void Searching::search_in_sequential_BitVector(
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//{
//// boost::dynamic_bitset<> is_visited(num_v_);
// BitVector is_visited(num_v_);
//
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
//// is_visited[init_ids[v_i]] = true;
// is_visited.atomic_set_bit(init_ids[v_i]);
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
// idi k = 0; // Index of every queue's first unchecked candidate.
// while (k < L) {
// Candidate &top_cand = set_L[k];
// unsigned nk = L;
// if (!top_cand.is_checked_) {
// top_cand.is_checked_ = true;
// idi v_id = top_cand.id_; // Vertex ID.
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// // Traverse v_id's all neighbors, pushing them into the queue
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = true;
//
// {// Self-defined BitVector
// if (is_visited.atomic_is_bit_set(nb_id)) {
// continue;
// }
// is_visited.atomic_set_bit(nb_id);
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// // Compute the distance
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// // Insert into the queue
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// if (nk <= k) {
// k = nk;
// } else {
// ++k;
// }
// }
//// cache_miss_kernel.measure_stop();
//#pragma omp parallel for
// for (size_t k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
/**
* Prepare init_ids and flags, as they are constant for all queries.
* @param[out] init_ids
* @param L
*/
inline void Searching::prepare_init_ids(
std::vector<unsigned int> &init_ids,
const unsigned L) const
{
// idi num_ngbrs = get_out_degree(ep_);
// edgei edge_start = nsg_graph_indices_[ep_];
// // Store ep_'s neighbors as candidates
// idi tmp_l = 0;
// for (; tmp_l < L && tmp_l < num_ngbrs; tmp_l++) {
// init_ids[tmp_l] = nsg_graph_out_edges_[edge_start + tmp_l];
// }
// std::unordered_set<idi> visited_ids;
boost::dynamic_bitset<> is_selected(num_v_);
idi *out_edges = (idi *) (opt_nsg_graph_ + ep_ * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
idi init_ids_end = 0;
// for (; tmp_l < L && tmp_l < out_degree; tmp_l++) {
for (idi e_i = 0; e_i < out_degree && init_ids_end < L; ++e_i) {
// idi v_id = out_edges[tmp_l];
idi v_id = out_edges[e_i];
if(is_selected[v_id]) {
continue;
}
is_selected[v_id] = true;
// init_ids[tmp_l] = v_id;
init_ids[init_ids_end++] = v_id;
// init_ids[tmp_l] = out_edges[tmp_l];
// visited_ids.insert(init_ids[tmp_l]);
}
// for (idi i = 0; i < tmp_l; ++i) {
// is_visited[init_ids[i]] = true;
// }
// If ep_'s neighbors are not enough, add other random vertices
idi tmp_id = ep_ + 1; // use tmp_id to replace rand().
while (init_ids_end < L) {
tmp_id %= num_v_;
idi v_id = tmp_id++;
if (is_selected[v_id]) {
continue;
}
// if (visited_ids.find(id) != visited_ids.end()) {
// continue;
// }
is_selected[v_id] = true;
// visited_ids.insert(id);
init_ids[init_ids_end++] = v_id;
// tmp_l++;
}
}
// TODO: re-code in AVX-512
inline dataf Searching::compute_norm(
const dataf *data) const
// idi vertex_id)
// const std::vector<PANNS::dataf> &data)
// size_t loc_start,
// idi dimension)
{
// const dataf *a = data.data() + loc_start;
// const dataf *a = data_load_ + vertex_id * dimension_;
// idi size = dimension_;
dataf result = 0;
//#define AVX_L2NORM(addr, dest, tmp) \
// tmp = _mm256_load_ps(addr); \
// tmp = _mm256_mul_ps(tmp, tmp); \
// dest = _mm256_add_ps(dest, tmp);
#define AVX_L2NORM(addr, dest, tmp) \
tmp = _mm256_loadu_ps(addr); \
tmp = _mm256_mul_ps(tmp, tmp); \
dest = _mm256_add_ps(dest, tmp);
__m256 sum;
__m256 l0, l1;
unsigned D = (dimension_ + 7) & ~7U;
unsigned DR = D % 16;
unsigned DD = D - DR;
const float *l = data;
const float *e_l = l + DD;
float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0};
sum = _mm256_load_ps(unpack);
// sum = _mm256_loadu_ps(unpack);
if (DR) { AVX_L2NORM(e_l, sum, l0); }
for (unsigned i = 0; i < DD; i += 16, l += 16) {
AVX_L2NORM(l, sum, l0);
AVX_L2NORM(l + 8, sum, l1);
}
_mm256_store_ps(unpack, sum);
// _mm256_storeu_ps(unpack, sum);
result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7];
return result;
}
inline dataf Searching::compute_distance_with_norm(
const dataf *v_data,
const dataf *q_data,
// idi vertex_id,
// idi query_id,
// const std::vector<PANNS::dataf> &d_data,
// const std::vector<PANNS::dataf> &q_data,
// PANNS::idi d_start,
// PANNS::idi q_start,
const dataf vertex_norm) const
// idi dimension)
{
// idi size = dimension_;
float result = 0;
//#define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \
// tmp1 = _mm256_load_ps(addr1);\
// tmp2 = _mm256_load_ps(addr2);\
// tmp1 = _mm256_mul_ps(tmp1, tmp2); \
// dest = _mm256_add_ps(dest, tmp1);
#define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \
tmp1 = _mm256_loadu_ps(addr1);\
tmp2 = _mm256_loadu_ps(addr2);\
tmp1 = _mm256_mul_ps(tmp1, tmp2); \
dest = _mm256_add_ps(dest, tmp1);
__m256 sum;
__m256 l0, l1;
__m256 r0, r1;
unsigned D = (dimension_ + 7) & ~7U;
unsigned DR = D % 16;
unsigned DD = D - DR;
const float *l = v_data;
const float *r = q_data;
// const float *l = (float *) (opt_nsg_graph_ + vertex_id * vertex_bytes_ + sizeof(distf));
// const float *r = queries_load_ + query_id * dimension_;
const float *e_l = l + DD;
const float *e_r = r + DD;
float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0};
sum = _mm256_load_ps(unpack);
// sum = _mm256_loadu_ps(unpack);
if (DR) { AVX_DOT(e_l, e_r, sum, l0, r0); }
for (unsigned i = 0; i < DD; i += 16, l += 16, r += 16) {
AVX_DOT(l, r, sum, l0, r0);
AVX_DOT(l + 8, r + 8, sum, l1, r1);
}
_mm256_store_ps(unpack, sum);
// _mm256_storeu_ps(unpack, sum);
result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7];
result = -2 * result + vertex_norm;
return result;
}
//// DEPRECATED.
// The difference from insert_into_queue is that add_into_queue will increase the queue size by 1.
//inline idi Searching::add_into_queue(
// std::vector<PANNS::Candidate> &queue,
// idi &queue_top,
// const idi queue_size,
// const PANNS::Candidate &cand)
//{
// assert(queue_size > 1);
// if (0 == queue_top) {
// queue[queue_top++] = cand;
// return 0;
// } else if (1 == queue_top) {
// if (queue[0] < cand) {
// queue[queue_top++] = cand;
// return 1;
// } else {
// queue[++queue_top] = queue[0];
// queue[0] = cand;
// return 0;
// }
// }
//
// if (queue[queue_top - 1] < cand) {
// if (queue_top < queue_size) {
// queue[queue_top++] = cand;
// }
// return queue_top;
// }
//
// idi r = insert_into_queue(
// queue,
// queue_top - 1,
// cand);
//// {//test
//// printf("r: %u"
//// "queue_top: %u "
//// "queue_size: %u\n",
//// r,
//// queue_top,
//// queue_size);
//// }
// return r;
//
//// /////////////////////////////////////////////////////////////
//// // Find the insert location
//// auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand);
//// idi insert_loc = it_loc - queue.begin();
//// if (insert_loc == queue_size) {
//// return queue_size;
//// }
////
//// // Insert
////// if (queue_top == queue_size) {
////// // If full already
////// --queue_top;
////// }
//// memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1),
//// reinterpret_cast<char *>(queue.data() + insert_loc),
//// (queue_top - insert_loc) * sizeof(Candidate));
////// for (idi q_i = queue_top; q_i > insert_loc; --q_i) {
////// queue.at(q_i) = queue.at(q_i - 1);
////// }
//// queue[insert_loc] = cand;
//// ++queue_top;
//// return insert_loc;
//}
// The difference from insert_into_queue is that add_into_queue will increase the queue size by 1.
inline idi Searching::add_into_queue(
std::vector<PANNS::Candidate> &queue,
idi &queue_top,
const idi queue_size,
const PANNS::Candidate &cand)
{
if (0 == queue_top) {
queue[queue_top++] = cand;
return 0;
}
// Find the insert location
auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand);
idi insert_loc = it_loc - queue.begin();
if (insert_loc == queue_size) {
return queue_size;
}
// Insert
if (queue_top == queue_size) {
// If full already
--queue_top;
}
memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1),
reinterpret_cast<char *>(queue.data() + insert_loc),
(queue_top - insert_loc) * sizeof(Candidate));
// for (idi q_i = queue_top; q_i > insert_loc; --q_i) {
// queue.at(q_i) = queue.at(q_i - 1);
// }
queue[insert_loc] = cand;
++queue_top;
return insert_loc;
}
// The difference from insert_into_queue is that add_into_queue will increase the queue size by 1.
// add_into_queue with a queue_start
inline idi Searching::add_into_queue(
std::vector<PANNS::Candidate> &queue,
const idi queue_start,
idi &queue_size, // The insertion location starting from queue_start
const idi queue_capacity, // The maximum capacity of queue, independent with queue_start.
const PANNS::Candidate &cand)
{
if (0 == queue_size) {
queue[queue_start + queue_size++] = cand;
return 0;
}
idi queue_end = queue_start + queue_size;
// Find the insert location
const auto it_loc = std::lower_bound(queue.begin() + queue_start, queue.begin() + queue_end, cand);
// auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_size, cand);
idi insert_loc = it_loc - queue.begin();
if (insert_loc != queue_end) {
if (cand.id_ == it_loc->id_) {
// Duplicate
return queue_capacity;
}
if (queue_size >= queue_capacity) { // Queue is full
--queue_size;
--queue_end;
}
} else { // insert_loc == queue_end, insert at the end?
if (queue_size < queue_capacity) { // Queue is not full
// Insert at the end
queue[insert_loc] = cand;
++queue_size;
return queue_size - 1;
} else { // Queue is full
return queue_capacity;
}
}
// Add into queue
memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1),
reinterpret_cast<char *>(queue.data() + insert_loc),
(queue_end - insert_loc) * sizeof(Candidate));
queue[insert_loc] = cand;
++queue_size;
return insert_loc - queue_start;
}
inline void Searching::add_into_queue_at(
const Candidate &cand,
std::vector<Candidate> &queue,
const idi insert_index, // The insertion location, independent with queue_start
const idi queue_start,
idi &queue_size, // The number of elements in queue, independent with queue_start
const idi queue_length) // The maximum capacity of queue, independent with queue_start.
{
const idi dest_index = queue_start + insert_index;
if (queue_size == queue_length) {
--queue_size;
}
memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1),
reinterpret_cast<char *>(queue.data() + dest_index),
(queue_size - insert_index) * sizeof(Candidate));
queue[dest_index] = cand;
++queue_size;
}
inline void Searching::insert_one_element_at(
// const T &cand,
// T *queue_base,
const Candidate &cand,
std::vector<Candidate> &queue,
const idi insert_index,
const idi queue_start,
const idi queue_size)
{
const idi dest_index = queue_start + insert_index;
memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1),
reinterpret_cast<char *>(queue.data() + dest_index),
(queue_size - insert_index - 1) * sizeof(Candidate));
queue[dest_index] = cand;
// memmove(reinterpret_cast<char *>(queue_base + dest_index + 1),
// reinterpret_cast<char *>(queue_base + dest_index),
// (queue_size - insert_index - 1) * sizeof(T));
// for (idi q_i = queue_size - 1; q_i > insert_index; --q_i) {
// queue_base.at(q_i + queue_start) = queue_base.at(q_i - 1 + queue_start);
// }
// queue_base[dest_index] = cand;
}
/**
* PANNS version of InsertIntoPool(): binary-search to find the insert place and then move.
* @param[out] c_queue
* @param c_queue_top
* @param cand
* @return
*/
inline idi Searching::insert_into_queue(
std::vector<PANNS::Candidate> &c_queue,
PANNS::idi c_queue_top,
PANNS::Candidate cand)
{
if (c_queue[0].distance_ > cand.distance_) {
// If the first
memmove(reinterpret_cast<char *>(c_queue.data() + 1),
reinterpret_cast<char *>(c_queue.data()),
c_queue_top * sizeof(Candidate));
c_queue[0] = cand;
return 0;
} else if (c_queue[c_queue_top - 1].distance_ == cand.distance_) {
// If the last
if (c_queue[c_queue_top - 1].id_ > cand.id_) {
// Use ID as the second metrics for ordering
c_queue[c_queue_top - 1] = cand;
return c_queue_top - 1;
} else {
return c_queue_top;
}
}
idi left = 0;
idi right = c_queue_top;
while (left < right) {
idi mid = (right - left) / 2 + left;
if (c_queue[mid].distance_ > cand.distance_) {
right = mid;
} else {
left = mid + 1;
}
}
// If the distance is the same
if (0 != left && c_queue[left - 1].distance_ != cand.distance_) {
;
} else {
while (0 != left
&& c_queue[left - 1].distance_ == cand.distance_
&& c_queue[left - 1].id_ > cand.id_) {
// Use ID as the second metrics for ordering
--left;
}
}
// Insert to left
memmove(reinterpret_cast<char *>(c_queue.data() + left + 1),
reinterpret_cast<char *>(c_queue.data() + left),
(c_queue_top - left) * sizeof(Candidate));
c_queue[left] = cand;
return left;
}
//inline void Searching::cand_pushes_ngbrs_into_queue(
// idi cand_id,
// const dataf *query_data,
// idi L,
// idi &new_k,
// boost::dynamic_bitset<> &is_visited,
// std::vector<Candidate> &set_L)
//{
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
//}
//inline void Searching::search_in_sequential(
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K) const
//{
// boost::dynamic_bitset<> is_visited(num_v_);
//
// for (idi v_i = 0; v_i < L; ++v_i) {
// is_visited[init_ids[v_i]] = true;
// }
// const dataf *query_data = queries_load_ + query_id * dimension_;
//
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
// idi k = 0; // Index of every queue's first unchecked candidate.
// while (k < L) {
// Candidate &top_cand = set_L[k];
// unsigned nk = L;
// if (!top_cand.is_checked_) {
// top_cand.is_checked_ = true;
// idi v_id = top_cand.id_; // Vertex ID.
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// // Traverse v_id's all neighbors, pushing them into the queue
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// // Compute the distance
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
// Candidate cand(nb_id, dist, false);
// // Insert into the queue
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// if (nk <= k) {
// k = nk;
// } else {
// ++k;
// }
// }
//
// for (size_t k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
// Deprecated: cannot use std::set, because its element is constant.
//inline void Searching::search_in_sequential(
// const idi query_id,
// const idi K,
// const idi L,
//// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K) const
//{
// std::set<Candidate> set_L;
// boost::dynamic_bitset<> is_visited(num_v_);
//
// for (idi v_i = 0; v_i < L; ++v_i) {
// is_visited[init_ids[v_i]] = true;
// }
// const dataf *query_data = queries_load_ + query_id * dimension_;
//
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
//// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// set_L.emplace(v_id, dist, false);
// }
//// std::sort(set_L.begin(), set_L.begin() + L);
// idi k = 0; // Index of every queue's first unchecked candidate.
// while (k < L) {
//// Candidate &top_cand = set_L[k];
// std::set<Candidate>::iterator top_cand = std::next(set_L.begin(), k);
// unsigned nk = L;
// if (!top_cand->is_checked_) {
// top_cand->is_checked_ = true;
// idi v_id = top_cand.id_; // Vertex ID.
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// // Traverse v_id's all neighbors, pushing them into the queue
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// // Compute the distance
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
// Candidate cand(nb_id, dist, false);
// // Insert into the queue
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// if (nk <= k) {
// k = nk;
// } else {
// ++k;
// }
// }
//
// for (size_t k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
/* Function:
* queue1_size is fixed.
*/
inline idi Searching::merge_two_queues_into_1st_queue_seq_fixed(
std::vector<Candidate> &queue1,
const idi queue1_start,
const idi queue1_size,
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size)
// const idi limit_size)
{
assert(queue1_size && queue2_size);
// Record the lowest insert location.
auto it_loc = std::lower_bound(
queue1.begin() + queue1_start,
queue1.begin() + queue1_start + queue1_size,
queue2[queue2_start]);
idi insert_index = it_loc - (queue1.begin() + queue1_start);
if (insert_index == queue1_size) {
return insert_index;
} else if (insert_index == queue1_size - 1) {
queue1[queue1_start + insert_index] = queue2[queue2_start];
return insert_index;
}
// Insert the 1st of queue2
if (queue2[queue2_start].id_ != it_loc->id_) {
// Not Duplicate
insert_one_element_at(
queue2[queue2_start],
queue1,
insert_index,
queue1_start,
queue1_size);
}
if (queue2_size == 1) {
return insert_index;
}
// Insert
idi q_i_1 = insert_index + 1 + queue1_start;
idi q_i_2 = queue2_start + 1;
const idi q_i_1_bound = queue1_start + queue1_size;
const idi q_i_2_bound = queue2_start + queue2_size;
// const idi insert_i_bound = queue1_start + limit_size;
for (idi insert_i = insert_index + 1; insert_i < queue1_size; ++insert_i) {
if (q_i_1 >= q_i_1_bound || q_i_2 >= q_i_2_bound) {
// queue1 or queue2 finished traverse. Rest o
break;
} else if (queue1[q_i_1] < queue2[q_i_2]) {
++q_i_1;
} else if (queue2[q_i_2] < queue1[q_i_1]) {
// Insert queue2[q_i_2] into queue1
insert_one_element_at(
queue2[q_i_2++],
queue1,
insert_i,
queue1_start,
queue1_size);
++q_i_1;
} else {
// Duplicate
++q_i_2;
++q_i_1;
}
}
return insert_index;
}
/* Function:
* queue1_size should be updated.
* queue1_length should be provided.
*/
inline void Searching::merge_two_queues_into_1st_queue_seq_incr(
std::vector<Candidate> &queue1,
const idi queue1_start,
idi &queue1_size, // The number of element in queue1, independent with queue1_start.
const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start.
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size)
// const idi limit_size)
{
assert(queue1_size && queue2_size);
// Record the lowest insert location.
auto it_loc = std::lower_bound(
queue1.begin() + queue1_start,
queue1.begin() + queue1_start + queue1_size,
queue2[queue2_start]);
idi insert_index = it_loc - (queue1.begin() + queue1_start);
if (insert_index == queue1_size) {
idi copy_count = (queue1_size + queue2_size > queue1_length) ?
queue1_length - queue1_size :
queue2_size;
memmove(queue1.data() + queue1_start + queue1_size,
queue2.data() + queue2_start,
copy_count * sizeof(Candidate));
queue1_size += copy_count;
return;
}
if (queue2[queue2_start].id_ != it_loc->id_) {
// Not Duplicate
add_into_queue_at(
queue2[queue2_start],
queue1,
insert_index,
queue1_start,
queue1_size,
queue1_length);
}
if (queue2_size == 1) {
return;
}
// Insert
idi q_i_1 = insert_index + 1 + queue1_start;
idi q_i_2 = queue2_start + 1;
idi q_i_1_bound = queue1_start + queue1_size; // When queue1_size is updated, so should be q_i_1_bound.
const idi q_i_2_bound = queue2_start + queue2_size;
// idi insert_i;
for (idi insert_i = insert_index + 1; insert_i < queue1_length; ++insert_i) {
if (q_i_1 >= q_i_1_bound) {
queue1_size += std::min(queue1_length - insert_i, q_i_2_bound - q_i_2);
for ( ; insert_i < queue1_size; ++insert_i) {
queue1[queue1_start + insert_i] = queue2[q_i_2++];
}
break;
} else if (q_i_2 >= q_i_2_bound) {
break;
} else if (queue1[q_i_1] < queue2[q_i_2]) {
++q_i_1;
} else if (queue2[q_i_2] < queue1[q_i_1]) {
add_into_queue_at(
queue2[q_i_2++],
queue1,
insert_i,
queue1_start,
queue1_size,
queue1_length);
++q_i_1;
q_i_1_bound = queue1_start + queue1_size;
} else {
// Duplicate
++q_i_2;
++q_i_1;
}
}
}
inline idi Searching::merge_all_queues_para_list(
std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<idi> &local_queues_ends,
std::vector<Candidate> &set_L,
const idi L)
{
int size = 1 << (static_cast<idi>(log2(num_threads_)));
idi log2size = static_cast<idi>(log2(size));
for (idi d = 0; d < log2size; ++d) {
uint32_t by = 1 << (d + 1);
#pragma omp parallel for
for (int i = 0; i < size; i += by) {
idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1
idi bi = i + (1 << d) - 1; // i + 2^d - 1
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
local_queues_list[ai].swap(local_queues_list[bi]);
std::swap(local_queues_ends[ai], local_queues_ends[bi]);
continue;
}
// else if (local_queues_ends[ai] < L && local_queues_ends[bi] >= L) {
// local_queues_list[ai].swap(local_queues_list[bi]);
// std::swap(local_queues_ends[ai], local_queues_ends[bi]);
// }
// merge_two_queues_into_1st_queue_seq(
// local_queues_list[ai],
// 0,
// local_queues_ends[ai],
// local_queues_list[bi],
// 0,
// local_queues_ends[bi]);
idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi];
std::vector<Candidate> tmp_queue(tmp_length);
std::merge(
local_queues_list[ai].begin(),
local_queues_list[ai].begin() + local_queues_ends[ai],
local_queues_list[bi].begin(),
local_queues_list[bi].begin() + local_queues_ends[bi],
tmp_queue.begin());
if (tmp_length > L) {
tmp_queue.resize(L);
tmp_length = L;
} else if (tmp_length < L) {
tmp_queue.resize(L);
}
local_queues_list[ai].swap(tmp_queue);
local_queues_ends[ai] = tmp_length;
// {// Print queue a
// printf("d: %u "
// "i: %u "
// "ai: %u "
// "local_queues_ends[%d]: %d\n",
// d,
// i,
// ai,
// ai,
// local_queues_ends[ai]);
// for (idi i_q = 0; i_q < local_queues_ends[ai]; ++i_q) {
// printf("[%u]: "
// "id: %u "
// "dist: %f\n",
// i_q,
// local_queues_list[ai][i_q].id_,
// local_queues_list[ai][i_q].distance_);
// }
// }
}
}
// Remain, prefix-sum-like merge
if (size != num_threads_) {
for (int i = size; i < num_threads_; ++i) {
idi ai = i;
idi bi = i - 1;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
local_queues_list[ai].swap(local_queues_list[bi]);
std::swap(local_queues_ends[ai], local_queues_ends[bi]);
continue;
}
// else if (local_queues_ends[ai] < L && local_queues_ends[bi] >= L) {
// local_queues_list[ai].swap(local_queues_list[bi]);
// std::swap(local_queues_ends[ai], local_queues_ends[bi]);
// }
// merge_two_queues_into_1st_queue_seq(
// local_queues_list[ai],
// 0,
// local_queues_ends[ai],
// local_queues_list[bi],
// 0,
// local_queues_ends[bi]);
idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi];
std::vector<Candidate> tmp_queue(tmp_length);
std::merge(
local_queues_list[ai].begin(),
local_queues_list[ai].begin() + local_queues_ends[ai],
local_queues_list[bi].begin(),
local_queues_list[bi].begin() + local_queues_ends[bi],
tmp_queue.begin());
if (tmp_length > L) {
tmp_queue.resize(L);
tmp_length = L;
} else if (tmp_length < L) {
tmp_queue.resize(L);
}
local_queues_list[ai].swap(tmp_queue);
local_queues_ends[ai] = tmp_length;
}
}
// Merge into set_L
idi r = L;
if (local_queues_ends[num_threads_ - 1]) {
r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
0,
L,
local_queues_list[num_threads_ - 1],
0,
local_queues_ends[num_threads_ - 1]);
}
// Reset local_queues_ends
std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
return r;
}
/* Function:
* Use large local_queues_array as a concatenation of all queues
*/
inline idi Searching::merge_all_queues_para_array(
std::vector<Candidate> &set_L,
std::vector<idi> &local_queues_ends,
const idi local_queue_length,
const idi L)
{
const int num_queues = num_threads_;
idi nk = L;
int size = 1 << (static_cast<idi>(log2(num_queues)));
idi log2size = static_cast<idi>(log2(size));
for (idi d = 0; d < log2size; ++d) {
uint32_t by = 1 << (d + 1);
#pragma omp parallel for
for (int i = 0; i < size; i += by) {
idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1
idi a_start = ai * local_queue_length;
idi bi = i + (1 << d) - 1; // i + 2^d - 1
idi b_start = bi * local_queue_length;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
std::copy(set_L.begin() + b_start,
set_L.begin() + b_start + local_queues_ends[bi],
set_L.begin() + a_start); // Copy bi to ai
local_queues_ends[ai] = local_queues_ends[bi];
local_queues_ends[bi] = 0;
continue;
}
if (ai != static_cast<idi>(num_queues - 1)) {
merge_two_queues_into_1st_queue_seq_incr(
set_L,
a_start,
local_queues_ends[ai],
local_queue_length,
set_L,
b_start,
local_queues_ends[bi]);
} else {
idi r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
a_start,
L,
set_L,
b_start,
local_queues_ends[bi]);
if (r < nk) {
nk = r;
}
}
}
}
// Remain, prefix-sum-like merge
if (size != num_queues) {
for (int i = size; i < num_queues; ++i) {
idi ai = i;
idi a_start = ai * local_queue_length;
idi bi = i - 1;
idi b_start = bi * local_queue_length;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
std::copy(set_L.begin() + b_start,
set_L.begin() + b_start + local_queues_ends[bi],
set_L.begin() + a_start); // Copy bi to ai
local_queues_ends[ai] = local_queues_ends[bi];
local_queues_ends[bi] = 0;
continue;
}
if (ai != static_cast<idi>(num_queues - 1)) {
merge_two_queues_into_1st_queue_seq_incr(
set_L,
a_start,
local_queues_ends[ai],
local_queue_length,
set_L,
b_start,
local_queues_ends[bi]);
} else {
idi r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
a_start,
L,
set_L,
b_start,
local_queues_ends[bi]);
if (r < nk) {
nk = r;
}
}
}
}
// Reset local_queues_ends
// Not do this for Collector Idea or Selecting Idea
std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0);
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
return nk;
// return r;
}
/* Function:
* When merge all queues (in an array, and [num_threads_ - 1] is the global queue),
* the starting local is at [queue_base]
*/
inline idi Searching::merge_all_queues_queue_base(
// std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<Candidate> &set_L,
// std::vector<Candidate> &local_queues_array,
std::vector<idi> &local_queues_ends,
const idi queue_base,
const int real_threads,
const idi local_queue_length,
// std::vector<Candidate> &set_L,
const idi L)
{
idi nk = L;
int size = 1 << (static_cast<idi>(log2(real_threads)));
// int size = 1 << (static_cast<idi>(log2(num_threads_)));
idi log2size = static_cast<idi>(log2(size));
for (idi d = 0; d < log2size; ++d) {
idi by = 1 << (d + 1);
idi i_bound = size + queue_base;
#pragma omp parallel for num_threads(real_threads)
for (idi i = queue_base; i < i_bound; i += by) {
// for (int i = 0; i < size; i += by) {
// idi ai = i + (1 << (d + 1)) - 1 + queue_base; // i + 2^(d+1) - 1
idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1
idi a_start = ai * local_queue_length;
// idi bi = i + (1 << d) - 1 + queue_base; // i + 2^d - 1
idi bi = i + (1 << d) - 1; // i + 2^d - 1
idi b_start = bi * local_queue_length;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
// local_queues_list[ai].swap(local_queues_list[bi]);
std::copy(set_L.begin() + b_start,
set_L.begin() + b_start + local_queues_ends[bi],
set_L.begin() + a_start); // Copy bi to ai
local_queues_ends[ai] = local_queues_ends[bi];
local_queues_ends[bi] = 0;
continue;
}
if (ai != static_cast<idi>(num_threads_ - 1)) {
merge_two_queues_into_1st_queue_seq_incr(
set_L,
a_start,
local_queues_ends[ai],
local_queue_length,
set_L,
b_start,
local_queues_ends[bi]);
} else {
idi r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
a_start,
L,
set_L,
b_start,
local_queues_ends[bi]);
if (r < nk) {
nk = r;
}
}
}
}
// Remain, prefix-sum-like merge
if (size != real_threads) {
// if (size != num_threads_) {
for (int i = size + queue_base; i < num_threads_; ++i) {
// for (int i = size; i < num_threads_; ++i) {
idi ai = i;
idi a_start = ai * local_queue_length;
idi bi = i - 1;
idi b_start = bi * local_queue_length;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
std::copy(set_L.begin() + b_start,
set_L.begin() + b_start + local_queues_ends[bi],
set_L.begin() + a_start); // Copy bi to ai
local_queues_ends[ai] = local_queues_ends[bi];
local_queues_ends[bi] = 0;
continue;
}
if (ai != static_cast<idi>(num_threads_ - 1)) {
merge_two_queues_into_1st_queue_seq_incr(
set_L,
a_start,
local_queues_ends[ai],
local_queue_length,
set_L,
b_start,
local_queues_ends[bi]);
} else {
idi r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
a_start,
L,
set_L,
b_start,
local_queues_ends[bi]);
if (r < nk) {
nk = r;
}
}
}
}
// Reset local_queues_ends
std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0);
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
return nk;
// return r;
}
inline void Searching::merge_two_consecutive_queues_in_place(
std::vector<Candidate> &two_queues,
const idi base_1,
// const idi &end_1,
const idi base_2,
const idi &length_2)
{
// idi tid = omp_get_thread_num();
idi index_1 = base_1;
idi index_2 = base_2;
const idi bound_2 = base_2 + length_2;
while (index_1 < index_2
&& index_2 < bound_2) {
Candidate e_1 = two_queues[index_1];
Candidate e_2 = two_queues[index_2];
if (e_1 < e_2) {
++index_1;
} else if (e_2 < e_1) {
// time_memmove_list_[tid] -= WallTimer::get_time_mark();
std::memmove(two_queues.data() + index_1 + 1,
two_queues.data() + index_1,
(index_2 - index_1) * sizeof(Candidate));
// time_memmove_list_[tid] += WallTimer::get_time_mark();
two_queues[index_1] = e_2;
++index_1;
++index_2;
} else { // Duplicate, but have no idea what to do right now
// time_memmove_list_[tid] -= WallTimer::get_time_mark();
std::memmove(two_queues.data() + index_1 + 1,
two_queues.data() + index_1,
(index_2 - index_1) * sizeof(Candidate));
// time_memmove_list_[tid] += WallTimer::get_time_mark();
index_1 += 2;
++index_2;
}
}
}
///* Function:
// * Merge all queues to the global queue, in a two-queue-merge way
// */
//inline idi Searching::merge_all_queues_all_together_in_sequential(
// std::vector<Candidate> &set_L,
// std::vector<idi> &local_queues_ends,
// const idi local_queue_length,
// const idi L)
//{
// const idi num_queues = num_threads_;
// const idi global_queue_base = (num_queues - 1) * local_queue_length;
// std::vector<idi> queue_heads(num_queues, 0);
// idi queue_id_min;
//
//// bool is_finished = false;
// bool is_1st_selected = true;
// idi nk = L; // The highest location of insertion.
// {
// for (idi q_i = 0; q_i < num_queues; ++q_i) {
// if (0 == local_queues_ends[q_i]) {
// continue;
// }
// _mm_prefetch(set_L.data() + q_i * local_queue_length, _MM_HINT_T0);
// }
// }
// while (queue_heads[num_queues - 1] < L) {
//// time_compare_minimum_ -= WallTimer::get_time_mark();
// queue_id_min = min_all_queues_at_heads(
// set_L,
// queue_heads,
// local_queues_ends,
// local_queue_length,
// L);
//// time_compare_minimum_ += WallTimer::get_time_mark();
// if (queue_id_min != num_queues - 1) { // Not in the global queue
//// time_insert_ -= WallTimer::get_time_mark();
// insert_one_element_at(
// set_L[queue_heads[queue_id_min] + queue_id_min * local_queue_length],
// set_L,
// queue_heads[num_queues - 1],
// global_queue_base,
// L);
//// time_insert_ += WallTimer::get_time_mark();
// if (is_1st_selected) { // Get the highest inserting location
// is_1st_selected = false;
// nk = queue_heads[num_queues - 1];
// }
// ++queue_heads[queue_id_min];
// }
// ++queue_heads[num_queues - 1];
// }
//
// // Reset local_queues_ends
// std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0);
// return nk;
//}
///* Function:
// * Find the minimum among queues at their head locations
// */
//inline idi Searching::min_all_queues_at_heads(
// const std::vector<Candidate> &set_L,
// std::vector<idi> &queue_heads,
// const std::vector<idi> &local_queues_ends,
// const idi local_queue_length,
// const idi L)
//{
// const idi num_queues = num_threads_;
// idi min_queue_id = num_queues - 1;
// Candidate min_candidate = set_L[queue_heads[min_queue_id] + min_queue_id * local_queue_length];
//
// for (idi q_i = 0; q_i < num_queues - 1; ++q_i) {
// if (queue_heads[q_i] >= local_queues_ends[q_i]) { // q_i finished
// continue;
// }
// const Candidate &ele = set_L[queue_heads[q_i] + q_i * local_queue_length];
// if (ele < min_candidate) {
// min_candidate = ele;
// min_queue_id = q_i;
// } else if (ele.id_ == min_candidate.id_) { // Redundant element
// ++queue_heads[q_i];
// }
// }
//
// return min_queue_id;
//}
inline void Searching::merge_in_set_L(
std::vector<Candidate> &set_L,
const idi set_L_length,
const idi num_queues,
const idi local_queue_length)
{
idi size = 1 << (static_cast<idi>(log2(num_queues)));
idi log2size = static_cast<idi>(log2(size));
for (idi d = 0; d < log2size; ++d) {
const idi merge_length = (local_queue_length << d);
idi by = 1 << (d + 1);
// Parallel for
#pragma omp parallel for
for (idi i = 0; i < size; i += by) {
// idi a = i + (1 << d) - 1;
// idi b = i + (1 << (d + 1)) - 1;
idi a = i;
idi b = i + (1 << d);
idi base_a = a * local_queue_length;
idi base_b = b * local_queue_length;
if (base_a >= set_L_length || base_b >= set_L_length) {
continue;
}
idi length_b;
if (a + by < size) {
length_b = merge_length;
} else { // The last one
if (size == num_queues) {
length_b = set_L_length - base_b;
} else {
length_b = merge_length;
}
}
// printf("a: %u b: %u "
// "base_a: %u base_b: %u length_b: %u\n",
// a, b,
// base_a, base_b, length_b);
merge_two_consecutive_queues_in_place(
set_L,
base_a,
base_b,
length_b);
}
}
if (size != num_queues) {
for (idi i = size; i < num_queues; ++i) {
idi a = 0;
idi b = i;
idi base_a = a;
idi base_b = b * local_queue_length;
if (base_b >= set_L_length) {
continue;
}
idi length_b;
if (b != num_queues - 1) {
length_b = local_queue_length;
} else {
length_b = set_L_length - base_b;
}
// printf("a: %u b: %u "
// "base_a: %u base_b: %u length_b: %u\n",
// a, b,
// base_a, base_b, length_b);
merge_two_consecutive_queues_in_place(
set_L,
base_a,
base_b,
length_b);
}
}
}
/*
* 7/5/2020-20:27
* Every queue keeps only elements which can be ordered in the top-L globally.
* local_queues_lengths records the end location for all queues
*/
inline distf Searching::selecting_top_L_seq(
std::vector<Candidate> &set_L,
const idi global_L,
const idi num_queues,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes)
{
std::vector<idi> pointers(num_queues, 0);
distf bound_lth;
idi rank = 0;
bool is_finished = false;
distf min_dist = FLT_MAX;
idi min_q_i;
idi min_id;
while (rank < global_L) {
is_finished = true;
min_dist = FLT_MAX;
for (idi q_i = 0; q_i < num_queues; ++q_i) {
if (pointers[q_i] >= local_queues_sizes[q_i]) {
// q_i is finished
continue;
}
is_finished = false;
idi sub = pointers[q_i] + local_queues_starts[q_i];
distf tmp_dist = set_L[sub].distance_;
idi tmp_id = set_L[sub].id_;
if (tmp_dist < min_dist) {
min_dist = tmp_dist;
min_id = tmp_id;
min_q_i = q_i;
} else if (tmp_dist == min_dist && tmp_id < min_id) {
min_id = tmp_id;
min_q_i = q_i;
}
}
if (is_finished) {
{//test
printf("Error: selecting_top_L_seq: only found %u elements but global_L is %u.\n",
rank,
global_L);
}
break;
}
bound_lth = min_dist;
++pointers[min_q_i];
++rank;
}
std::copy(pointers.begin(), pointers.end(), local_queues_sizes.begin());
return bound_lth;
}
/*
* 7/24/2020-10:08
* Record for every queue the position that contains the top-M unchecked vertices.
* So the total expanded vertices should still be M, which means the computation should
* be the same with merging idea.
*/
inline void Searching::selecting_unchecked_top_M_seq(
const idi query_id,
const idi iter,
std::vector<Candidate> &set_L,
const std::vector<idi> &pointers_starts,
const idi value_M,
const idi num_queues,
const std::vector<idi> &local_queues_starts,
const std::vector<idi> &local_queues_sizes,
std::vector<idi> &local_m_counts)
{
std::vector<idi> pointers(pointers_starts);
// std::vector<idi> pointers(num_queues, 0);
std::fill(local_m_counts.begin(), local_m_counts.end(), 0);
idi rank = 0;
bool is_finished = true;
distf min_dist = FLT_MAX;
idi min_q_i;
idi min_id;
while (rank < value_M) {
min_dist = FLT_MAX;
for (idi q_i = 0; q_i < num_queues; ++q_i) {
idi &pointer = pointers[q_i];
idi sub = pointer + local_queues_starts[q_i];
// {//test
// if (133 == query_id &&
// 3 == iter &&
// 321341 == set_L[sub].id_) {
// printf("(%u %f)\n",
// set_L[sub].id_, set_L[sub].distance_);
// }
// }
while (pointer < local_queues_sizes[q_i]
&& set_L[sub].is_checked_) {
++pointer;
++sub;
}
if (pointer >= local_queues_sizes[q_i]) {
// q_i is finished
continue;
}
is_finished = false;
distf tmp_dist = set_L[sub].distance_;
idi tmp_id = set_L[sub].id_;
if (tmp_dist < min_dist) {
min_dist = tmp_dist;
min_id = tmp_id;
min_q_i = q_i;
} else if (tmp_dist == min_dist && tmp_id < min_id) {
min_id = tmp_id;
min_q_i = q_i;
}
}
if (!is_finished) {
is_finished = true;
++pointers[min_q_i];
++rank;
++local_m_counts[min_q_i];
} else {
break;
}
}
// std::copy(pointers.begin(), pointers.end(), local_top_m_positions.begin());
}
/*
* 7/27/2020-15:41
* Gather the top-M unchecked vertices from local queues.
*/
inline void Searching::gather_unchecked_top_M_seq(
const idi query_id,
const idi iter,
std::vector<Candidate> &set_L,
const std::vector<idi> &pointers_starts,
const idi value_M,
const idi num_queues,
const std::vector<idi> &local_queues_starts,
const std::vector<idi> &local_queues_sizes,
std::vector<idi> &top_m_candidates,
idi &top_m_candidates_size,
std::vector<idi> &bound_subs)
{
std::vector<idi> pointers(pointers_starts);
// std::vector<idi> pointers(num_queues, 0);
// std::fill(local_m_counts.begin(), local_m_counts.end(), 0);
// idi rank = 0;
bool is_finished = true;
distf min_dist = FLT_MAX;
idi min_q_i;
idi min_id;
while (top_m_candidates_size < value_M) {
min_dist = FLT_MAX;
for (idi q_i = 0; q_i < num_queues; ++q_i) {
idi &pointer = pointers[q_i];
idi sub = pointer + local_queues_starts[q_i];
while (pointer < local_queues_sizes[q_i]
&& set_L[sub].is_checked_) {
++pointer;
++sub;
}
if (pointer >= local_queues_sizes[q_i]) {
// q_i is finished
continue;
}
is_finished = false;
distf tmp_dist = set_L[sub].distance_;
idi tmp_id = set_L[sub].id_;
if (tmp_dist < min_dist) {
min_dist = tmp_dist;
min_id = tmp_id;
min_q_i = q_i;
} else if (tmp_dist == min_dist && tmp_id < min_id) {
min_id = tmp_id;
min_q_i = q_i;
}
}
if (!is_finished) {
is_finished = true;
idi sub = local_queues_starts[min_q_i] + pointers[min_q_i];
top_m_candidates[top_m_candidates_size++] = set_L[sub].id_;
set_L[sub].is_checked_ = true; // Checked
++pointers[min_q_i];
// ++rank;
// ++local_m_counts[min_q_i];
} else {
break;
}
}
// std::copy(pointers.begin(), pointers.end(), local_top_m_positions.begin());
std::copy(pointers.begin(), pointers.end(), bound_subs.begin());
}
inline void Searching::search_with_top_m(
const PANNS::idi M,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K)
{
boost::dynamic_bitset<> is_visited(num_v_);
{
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = true;
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
std::vector<idi> top_m_candidates(M);
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
while (k < L) {
++tmp_count;
unsigned nk = L;
// Select M candidates
idi last_k = L;
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[c_i].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
}
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = true;
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
idi r = insert_into_queue(set_L, L, cand);
if (r < nk) {
nk = r;
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
}
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
}
inline void Searching::search_with_top_m_scale_m(
const PANNS::idi value_M_max,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited)
{
// boost::dynamic_bitset<> is_visited(num_v_);
{
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = true;
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
// std::vector<idi> top_m_candidates(M);
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
idi M = 1;
while (k < L) {
++tmp_count;
unsigned nk = L;
// Select M candidates
idi last_k = L;
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[c_i].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
}
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = true;
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
idi r = insert_into_queue(set_L, L, cand);
if (r < nk) {
nk = r;
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
if (M < value_M_max) {
M <<= 1;
}
}
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
{// Reset
is_visited.reset();
}
}
////void Searching::search_with_top_m(
//inline void Searching::search_with_top_m_to_get_distance_range(
// const PANNS::idi M,
// const PANNS::idi query_id,
//// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids)
//// std::vector<idi> &set_K)
//{
// dist_max_ = -FLT_MAX;
// dist_min_ = FLT_MAX;
// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = true;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
//// {// For distance range
//// if (dist > dist_max_) {
//// dist_max_ = dist;
//// }
//// if (dist < dist_min_) {
//// dist_min_ = dist;
//// }
//// }
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
//// {// For distance range
//// if (dist > dist_max_) {
//// dist_max_ = dist;
//// }
//// if (dist < dist_min_) {
//// dist_min_ = dist;
//// }
//// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// {// For histogram
// for (idi i_l = 0; i_l < L; ++i_l) {
// distf dist = set_L[i_l].distance_;
// {// For distance range
// if (dist > dist_max_) {
// dist_max_ = dist;
// }
// if (dist < dist_min_) {
// dist_min_ = dist;
// }
// }
// }
// }
// }
//
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// set_K[k_i] = set_L[k_i].id_;
//// }
//}
//
////void Searching::search_with_top_m(
//inline void Searching::search_with_top_m_myths_M(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//{
//// {//test
//// printf("query_id: %u\n", query_id);
//// }
// const idi loc_range = L / 3;
//
//
// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = true;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
//// {// For histogram
//// const distf dist_range = dist_max_ - dist_min_;
//// printf("iter:%u\n", 0);
//// for (idi i_l = 0; i_l < L; ++i_l) {
//// printf("%f\n", (set_L[i_l].distance_ - dist_min_) / dist_range * 100.0);
//// }
//// }
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// std::vector<idi> range_count(3, 0);
// idi zero_inserted_count = 0;
//// {//test
//// printf("tmp_count: %u\n", tmp_count);
//// }
// ++tmp_count;
//
// unsigned nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//// {//test
//// printf("top_m_candidates_ends: %u\n", top_m_candidates_end);
//// }
// {
// if (0 == top_m_candidates_end) {
// break;
// }
// }
//
//
// uint64_t count_neighbors = 0;
// uint64_t count_inserted = 0;
// std::vector<idi> locs_to_count(M);
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
//
// count_neighbors += out_degree;
// idi num_inserted = 0;
//
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// ++num_inserted;
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
//// {
//// printf("c_i: %u "
//// "count: %u "
//// "loc_inserted: %u\n",
//// c_i,
//// num_inserted,
//// r);
//// }
// if (r < nk) {
// nk = r;
// }
// {
// ++range_count[r / loc_range];
// }
// }
// {
// if (0 == num_inserted) {
// ++zero_inserted_count;
// }
// locs_to_count[c_i] = num_inserted;
// count_inserted += num_inserted;
// }
//// {
//// printf("c_i: %u "
//// "num_inserted: %u\n",
//// c_i,
//// num_inserted);
//// }
// }
//// {
//// for (idi c_i = top_m_candidates_end; c_i < M; ++c_i) {
//// locs_to_count[c_i] = 0;
//// }
//// printf("iter:%u\n", tmp_count);
//// for (idi c_i = 0; c_i < M; ++c_i) {
//// printf("%u %u\n", c_i, locs_to_count[c_i]);
//// }
//// }
//// {//test
//// idi sum = 0;
//// for (const idi ct : range_count) sum += ct;
//// printf("tmp_count: %u "
//// "k: %u "
//// "actual_M: %u %.1f%% "
//// "zero_ins: %u %.1f%% "
//// "1/3: %u %.1f%% "
//// "2/3: %u %.1f%% "
//// "3/3: %u %.1f%%\n",
//// tmp_count,
//// k,
//// top_m_candidates_end, 100.0 * top_m_candidates_end / M,
//// zero_inserted_count, 100.0 * zero_inserted_count / top_m_candidates_end,
//// range_count[0], 100.0 * range_count[0] / sum,
//// range_count[1], 100.0 * range_count[1] / sum,
//// range_count[2], 100.0 * range_count[2] / sum);
//// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// {
// printf("query:%uiter: %u "
// "#neighbors: %lu "
// "#inserted: %lu "
// "ratio: %.2f%%\n",
// query_id, tmp_count,
// count_neighbors,
// count_inserted,
// 100.0 * count_inserted / count_neighbors);
// }
//// {// For histogram
////// const auto it_min = std::min_element(set_L.begin(), set_L.end());
////// const auto it_max = std::max_element(set_L.begin(), set_L.end());
////// const distf dist_min = it_min->distance_;
////// const distf dist_max = it_max->distance_;
////// const distf dist_min = it_min->distance_ - 1.0;
////// const distf dist_max = it_max->distance_ + 1.0;
//// const distf dist_range = dist_max_ - dist_min_;
////// const distf dist_range = dist_max - dist_min;
////// {
////// printf("it_min->distance_: %f dist_min: %f\n",
////// it_min->distance_, dist_min);
////// }
////// const distf dist_range = it_max->distance_ - it_min->distance_;
//// printf("iter:%u\n", tmp_count);
//// for (idi i_l = 0; i_l < L; ++i_l) {
////// printf("%f\n", set_L[i_l].distance_);
////// printf("%f\n", (set_L[i_l].distance_ - dist_min) / dist_range * 100.0);
//// printf("%f\n", (set_L[i_l].distance_ - dist_min_) / dist_range * 100.0);
////// printf("%.2f\n", (set_L[i_l].distance_ - it_min->distance_) / dist_range * 100.0);
//// }
//// }
// }
//
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
// if (query_id == 3) {
// exit(1);
// }
//}
//
//// Sequential Top-M algorithm for profiling purpose: byte array, CAS, and OpenMP
////void Searching::search_with_top_m(
//inline void Searching::search_with_top_m_profile_bit_CAS(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//{
//// std::vector<uint8_t> is_visited(num_v_, 0); // Byte array
//// boost::dynamic_bitset<> is_visited(num_v_); // Bit array
// BitVector is_visited(num_v_);
//
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
//// is_visited[init_ids[c_i]] = true;
// is_visited.atomic_set_bit(init_ids[c_i]);
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = true;
//
//// if (!AtomicOps::CAS(is_visited.data() + nb_id,
//// static_cast<uint8_t>(0),
//// static_cast<uint8_t>(1))) {
//// continue;
//// }
// {// Self-defined BitVector
// if (is_visited.atomic_is_bit_set(nb_id)) {
// continue;
// }
// is_visited.atomic_set_bit(nb_id);
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
////
//// {//test
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// printf("%u: %u: %u %f\n",
//// query_id,
//// k_i, set_L[k_i].id_, set_L[k_i].distance_);
//// }
//// exit(1);
//// }
//}
///// Backup
//inline void Searching::search_with_top_m(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//// std::vector< std::vector<idi> > &top_m_list)
//{
// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = true;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
////
//// {//test
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// printf("%u: %u: %u %f\n",
//// query_id,
//// k_i, set_L[k_i].id_, set_L[k_i].distance_);
//// }
//// exit(1);
//// }
//}
//
////// DEPRECATED: the is_visited array cannot be shared among threads.
//inline void Searching::search_with_top_m_no_local_arrays(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// boost::dynamic_bitset<> &is_visited)
//// std::vector< std::vector<idi> > &top_m_list)
//{
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = true;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
////
//// {//test
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// printf("%u: %u: %u %f\n",
//// query_id,
//// k_i, set_L[k_i].id_, set_L[k_i].distance_);
//// }
//// exit(1);
//// }
//}
inline void Searching::search_with_top_m_in_batch(
const PANNS::idi M,
const PANNS::idi batch_start,
const PANNS::idi batch_size,
const PANNS::idi K,
const PANNS::idi L,
std::vector< std::vector<Candidate> > &set_L_list,
const std::vector<idi> &init_ids,
std::vector< std::vector<idi> > &set_K_list)
{
std::vector< boost::dynamic_bitset<> > is_visited_list(batch_size, boost::dynamic_bitset<> (num_v_));
// Prepare the init_ids
{
//#pragma omp parallel for
for (idi q_i = 0; q_i < batch_size; ++q_i) {
auto &is_visited = is_visited_list[q_i];
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = true;
}
}
}
// Initialize set_L_list
{
//#pragma omp parallel for
for (idi q_i = 0; q_i < batch_size; ++q_i) {
const dataf *query_data = queries_load_ + (q_i + batch_start) * dimension_;
for (idi i = 0; i < L; i++) {
idi v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
// ++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L_list[q_i][i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L_list[q_i].begin(), set_L_list[q_i].begin() + L);
}
}
{
std::vector<idi> joint_queue(M * batch_size); // Joint queue for all shared top-M candidates
idi joint_queue_end = 0;
boost::dynamic_bitset<> is_in_joint_queue(num_v_);
// std::vector< std::vector<idi> > cands_query_ids(num_v_, std::vector<idi>(batch_size)); // If candidate cand_id is selected by query q_i, q_i should be in cands_query_ids[cand_id].
// std::vector<idi> cands_query_ids_ends(num_v_, 0);
std::unordered_map< idi, std::vector<idi> > cands_query_ids(batch_size * M);
std::vector<idi> ks(batch_size, 0); // Indices of every queue's first unchecked candidate.
std::vector<idi> nks(batch_size, L); // Indices of highest candidate inserted
std::vector<idi> last_ks(batch_size, L); // Indices of lowest candidate unchecked
std::vector<idi> queries_not_finished(batch_size);
idi queries_not_finished_end = batch_size;
for (idi q_i = 0; q_i < batch_size; ++q_i) {
queries_not_finished[q_i] = q_i;
}
bool is_finished = false;
idi counter_for_debug = 0;
while (!is_finished) {
++counter_for_debug;
// Build the new joint queue
// Traverse every query's queue
for(idi q_i = 0; q_i < queries_not_finished_end; ++q_i) {
idi q_local_id = queries_not_finished[q_i];
// last_ks[q_local_id] = L;
auto &set_L = set_L_list[q_local_id];
idi top_m_count = 0;
for (idi c_i = ks[q_local_id]; c_i < L && top_m_count < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
set_L[c_i].is_checked_ = true;
last_ks[q_local_id] = c_i;
++top_m_count;
idi cand_id = set_L[c_i].id_;
// Record which query selected cand_id
auto tmp_c = cands_query_ids.find(cand_id);
if (tmp_c != cands_query_ids.end()) {
tmp_c->second.push_back(q_local_id);
} else {
cands_query_ids.emplace(cand_id, std::vector<idi>());
cands_query_ids[cand_id].reserve(batch_size);
cands_query_ids[cand_id].push_back(q_local_id);
}
// cands_query_ids[cand_id][cands_query_ids_ends[cand_id]++] = q_local_id;
// Add candidate cand_id into the joint queue
if (is_in_joint_queue[cand_id]) {
continue;
}
is_in_joint_queue[cand_id] = true;
joint_queue[joint_queue_end++] = cand_id;
}
}
queries_not_finished_end = 0; // Clear queries_not_finished
// Traverse every shared candidate
for (idi c_i = 0; c_i < joint_queue_end; ++c_i) {
idi cand_id = joint_queue[c_i];
is_in_joint_queue[cand_id] = false; // Reset is_in_joint_queue
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
const auto &query_local_ids = cands_query_ids[cand_id];
// Push neighbors to every queue of the queries that selected cand_id.
// Traverse cand_id's neighbors
// idi &q_i_bound = cands_query_ids_ends[cand_id];
// for (idi q_i = 0; q_i < q_i_bound; ++q_i) {
// idi q_local_id = query_local_ids[q_i];
for (idi q_local_id : query_local_ids) {
dataf *query_data = queries_load_ + (q_local_id + batch_start) * dimension_;
auto &is_visited = is_visited_list[q_local_id];
auto &set_L = set_L_list[q_local_id];
// // Traverse cand_id's neighbors
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = true;
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
Candidate new_cand(nb_id, dist, false);
idi insert_loc = insert_into_queue(set_L, L, new_cand);
if (insert_loc < nks[q_local_id]) {
nks[q_local_id] = insert_loc;
}
}
}
cands_query_ids.erase(cand_id);
// q_i_bound = 0; // Clear cands_query_ids[cand_id]
}
joint_queue_end = 0; // Clear joint_queue
for (idi q_local_id = 0; q_local_id < batch_size; ++q_local_id) {
if (nks[q_local_id] <= last_ks[q_local_id]) {
ks[q_local_id] = nks[q_local_id];
} else {
ks[q_local_id] = last_ks[q_local_id] + 1;
}
nks[q_local_id] = L;
last_ks[q_local_id] = L;
if (ks[q_local_id] < L) {
queries_not_finished[queries_not_finished_end++] = q_local_id;
}
}
if (!queries_not_finished_end) {
is_finished = true;
}
}
}
{
for (idi q_i = 0; q_i < batch_size; ++q_i) {
for (idi c_i = 0; c_i < K && c_i < L; ++c_i) {
set_K_list[q_i + batch_start][c_i] = set_L_list[q_i][c_i].id_;
}
}
}
////
// {//test
// for (idi q_i = 0; q_i < batch_size; ++q_i) {
// printf("query: %u\n", q_i + batch_start);
// for (idi c_i = 0; c_i < K; ++c_i) {
// printf("%u: %u %f\n", c_i, set_L_list[q_i][c_i].id_, set_L_list[q_i][c_i].distance_);
// }
// }
// }
}
//inline void Searching::para_search_with_top_m_critical_area(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//// std::vector< std::vector<idi> > &top_m_list)
//{
// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
////#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//// int nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT.
////#pragma omp parallel for
////#pragma omp parallel for reduction(min : nk)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r;
////#pragma omp critical
// {
// r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
////#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
//
//inline void Searching::para_search_with_top_m_critical_area_no_omp(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//// std::vector< std::vector<idi> > &top_m_list)
//{
// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
////#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//// int nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT.
////#pragma omp parallel for
////#pragma omp parallel for reduction(min : nk)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r;
////#pragma omp critical
// {
// r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
////#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
//
//inline void Searching::para_search_with_top_m_critical_area_yes_omp(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//// std::vector< std::vector<idi> > &top_m_list)
//{
// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//// int nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT.
////#pragma omp parallel for
////#pragma omp parallel for reduction(min : nk)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r;
////#pragma omp critical
// {
// r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
////#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
//
//inline void Searching::para_search_with_top_m_visited_array(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// std::vector<uint8_t> &is_visited)
//// std::vector< std::vector<idi> > &top_m_list)
//{
//// uint64_t count_visited = 0;
//
//// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
////#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
//// ++count_visited;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//// int nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT.
////#pragma omp parallel for
////#pragma omp parallel for reduction(min : nk)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//// ++count_visited;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r;
////#pragma omp critical
// {
// r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
////#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//
//// {
//// printf("query_id: %u "
//// "count_visited: %lu %f%%\n",
//// query_id,
//// count_visited,
//// 100.0 * count_visited / num_v_);
//// }
//}
//
//inline void Searching::para_search_with_top_m_merge_queues(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//{
//// {//test
//// printf("query_id: %u\n", query_id);
//// }
//// const idi local_queue_length = ((M - 1) / num_threads_ + 1) * width_;
// const idi local_queue_length = L;
// std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length));
// std::vector<idi> local_queues_ends(num_threads_, 0);
// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand);
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// idi nk = L;
//// // Merge. Parallel merging in every two queues.
//// {
//// for (int tid = 0; tid < num_threads_; ++tid) {
//// if (0 == local_queues_ends[tid]) continue;
//// idi r = merge_two_queues_into_1st_queue_para(
//// set_L,
//// 0,
//// L,
//// local_queues_list[tid],
//// 0,
//// local_queues_ends[tid]);
////// idi r = merge_two_queues_into_1st_queue_seq(
////// set_L,
////// 0,
////// L,
////// local_queues_list[tid],
////// 0,
////// local_queues_ends[tid]);
//// local_queues_ends[tid] = 0; // Reset the local queue
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// }
//// {// text
//// if (query_id == 4 &&
//// tmp_count == 5) {
//// // Print local queues
//// for (int t_i = 0; t_i < num_threads_; ++t_i) {
////// idi start_i = t_i * local_queue_length;
//// for (idi q_i = 0; q_i < local_queues_ends[t_i]; ++q_i) {
//// printf("t[%u][%u]: "
//// "id: %u "
//// "dist: %f\n",
//// t_i, q_i,
//// local_queues_list[t_i][q_i].id_,
//// local_queues_list[t_i][q_i].distance_);
//// }
//// }
//// printf("----------\n");
//// for (idi i = 0; i < L; ++i) {
//// printf("set_L[%u]: "
//// "id: %u "
//// "dist: %f\n",
//// i,
//// set_L[i].id_,
//// set_L[i].distance_);
//// }
//// printf("----------\n");
//// }
//// }
// // Merge. Merge all queues in parallel.
// {
// if (num_threads_ > 1) {
// idi r = merge_all_queues_para_list(
// local_queues_list,
// local_queues_ends,
// set_L,
// L);
// if (r < nk) {
// nk = r;
// }
// } else {
// if (local_queues_ends[0]) {
// idi r = merge_two_queues_into_1st_queue_seq_fixed(
// set_L,
// 0,
// L,
// local_queues_list[0],
// 0,
// local_queues_ends[0]);
// local_queues_ends[0] = 0;
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
//// {//test
//// if (query_id == 4) {
//// for (idi i = 0; i < L; ++i) {
//// printf("tmp_count: %u "
//// "set_L[%u]: "
//// "id: %u "
//// "dist: %f\n",
//// tmp_count,
//// i,
//// set_L[i].id_,
//// set_L[i].distance_);
//// }
//// }
////
//// }
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//// {
//// exit(1);
//// }
//// {//test
////
////// if (query_id == 4) {
//// for (idi i = 0; i < L; ++i) {
//// printf("set_L[%u]: "
//// "id: %u "
//// "dist: %f\n",
//// i,
//// set_L[i].id_,
//// set_L[i].distance_);
//// }
////// exit(1);
////// }
//// }
//}
//
////// Using local queue and then sequential merge.
//inline void Searching::para_search_with_top_m_queues_seq_merge(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//// std::vector< std::vector<idi> > &top_m_list)
//{
//// const idi local_queue_length = ((L - 1) / num_threads_ + 1) * width_;
// const idi local_queue_length = L;
// std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length));
// std::vector<idi> local_queues_ends(num_threads_, 0);
// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//// for (idi v_i = 0; v_i < L; ++v_i) {
//// idi v_id = init_ids[v_i];
//// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
//// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//// {
//// printf("tmp_count: %u "
//// "k: %u\n",
//// tmp_count,
//// k);
//// }
//
//// unsigned nk = L;
//// int nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
//// idi r;
////#pragma omp critical
//// {
//// r = insert_into_queue(set_L, L, cand);
//// if (r < nk) {
//// nk = r;
//// }
//// }
// // Add to the local queue.
// add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand);
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// idi nk = L;
// // Merge
// {
// for (int tid = 0; tid < num_threads_; ++tid) {
// if (0 == local_queues_ends[tid]) continue;
// idi r = merge_two_queues_into_1st_queue_seq_fixed(
// set_L,
// 0,
// L,
// local_queues_list[tid],
// 0,
// local_queues_ends[tid]);
//// L + 1);
// local_queues_ends[tid] = 0; // Reset the local queue
// if (r < nk) {
// nk = r;
// }
// }
// }
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
////
//// {//test
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// printf("%u: %u: %u %f\n",
//// query_id,
//// k_i, set_L[k_i].id_, set_L[k_i].distance_);
//// }
//// exit(1);
//// }
//}
//
//inline void Searching::para_search_with_top_m_merge_queues_no_CAS(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length,
// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<idi> &local_queues_ends,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited)
//{
////// const idi local_queue_length = ((M - 1) / num_threads_ + 1) * width_;
//// const idi local_queue_length = L;
//// std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length));
//// std::vector<idi> local_queues_ends(num_threads_, 0);
////// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
//
//// if (!AtomicOps::CAS(is_visited.data() + nb_id,
//// static_cast<uint8_t>(0),
//// static_cast<uint8_t>(1))) {
//// continue;
//// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand);
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// idi nk = L;
//// // Merge. Parallel merging in every two queues.
//// {
//// for (int tid = 0; tid < num_threads_; ++tid) {
//// if (0 == local_queues_ends[tid]) continue;
//// idi r = merge_two_queues_into_1st_queue_para(
//// set_L,
//// 0,
//// L,
//// local_queues_list[tid],
//// 0,
//// local_queues_ends[tid]);
////// idi r = merge_two_queues_into_1st_queue_seq(
////// set_L,
////// 0,
////// L,
////// local_queues_list[tid],
////// 0,
////// local_queues_ends[tid]);
//// local_queues_ends[tid] = 0; // Reset the local queue
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// }
//// // Merge. Merge all queues in parallel.
//// {
//// if (num_threads_ > 1) {
//// idi r = merge_all_queues_para(
//// local_queues_list,
//// local_queues_ends,
//// set_L,
//// L);
//// if (r < nk) {
//// nk = r;
//// }
//// } else {
//// if (local_queues_ends[0]) {
//// idi r = merge_two_queues_into_1st_queue_seq(
//// set_L,
//// 0,
//// L,
//// local_queues_list[0],
//// 0,
//// local_queues_ends[0]);
//// local_queues_ends[0] = 0;
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// }
//// }
// // Merge
// {
// for (int tid = 0; tid < num_threads_; ++tid) {
// if (0 == local_queues_ends[tid]) continue;
// idi r = merge_two_queues_into_1st_queue_seq_fixed(
// set_L,
// 0,
// L,
// local_queues_list[tid],
// 0,
// local_queues_ends[tid]);
//// L + 1);
// local_queues_ends[tid] = 0; // Reset the local queue
// if (r < nk) {
// nk = r;
// }
// }
// }
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//
// {// Reset
// is_visited.reset();
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//}
//inline void Searching::para_search_with_top_m_merge_queues_in_array(
//inline void Searching::para_search_with_top_m_merge_queues_new_threshold(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
//// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<Candidate> &local_queues_array,
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// BitVector &is_visited)
//// std::vector<uint8_t> &is_visited)
//// boost::dynamic_bitset<> &is_visited)
//{
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
//// is_visited[init_ids[c_i]] = 1;
// is_visited.atomic_set_bit(init_ids[c_i]);
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// idi min_index = L - 1;
// distf min_1st = set_L[min_index].distance_;
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// const idi local_queue_start = tid * local_queue_length;
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// { // Sequential edition
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//// }
//// { // __ATOMIC_SEQ_CST edition
//// if (!AtomicOps::CAS(is_visited.data() + nb_id,
//// static_cast<uint8_t>(0),
//// static_cast<uint8_t>(1))) {
//// continue;
//// }
//// }
//// {// Acquire and Release edition
//// if (__atomic_load_n(is_visited.data() + nb_id, __ATOMIC_ACQUIRE)) {
//// continue;
//// }
//// __atomic_store_n(is_visited.data() + nb_id, 1, __ATOMIC_RELEASE);
//// }
// {// Self-defined BitVector
// if (is_visited.atomic_is_bit_set(nb_id)) {
// continue;
// }
// is_visited.atomic_set_bit(nb_id);
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
//
// if (dist > min_1st) {
// continue;
// } else if (min_index > 0) {
// // Inserted, so min_1st needs update
// if (dist > set_L[min_index - 1].distance_) {
// min_1st = dist;
// if (min_index < L - 1) {
// ++min_index;
// }
// } else {
// min_1st = set_L[--min_index].distance_;
// }
//// min_1st = set_L[--min_index].distance_;
// }
//
//// if (dist > set_L[L-1].distance_) {
//// continue;
//// }
//
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// add_into_queue(local_queues_array, local_queue_start, local_queues_ends[tid], local_queue_length, cand);
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// idi nk = L;
//// // Merge. Parallel merging in every two queues.
//// {
//// for (int tid = 0; tid < num_threads_; ++tid) {
//// if (0 == local_queues_ends[tid]) continue;
//// idi r = merge_two_queues_into_1st_queue_para(
//// set_L,
//// 0,
//// L,
//// local_queues_list[tid],
//// 0,
//// local_queues_ends[tid]);
////// idi r = merge_two_queues_into_1st_queue_seq(
////// set_L,
////// 0,
////// L,
////// local_queues_list[tid],
////// 0,
////// local_queues_ends[tid]);
//// local_queues_ends[tid] = 0; // Reset the local queue
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// }
// // Merge. Merge all queues in parallel.
// {
// if (num_threads_ > 1) {
// idi r = merge_all_queues_para_array(
//// local_queues_list,
// local_queues_array,
// local_queues_ends,
// local_queue_length,
// set_L,
// L);
// if (r < nk) {
// nk = r;
// }
// } else {
// if (local_queues_ends[0]) {
// idi r = merge_two_queues_into_1st_queue_seq_fixed(
// set_L,
// 0,
// L,
//// local_queues_list[0],
// local_queues_array,
// 0,
// local_queues_ends[0]);
// local_queues_ends[0] = 0;
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
//// // Merge Sequentially
//// {
//// for (int tid = 0; tid < num_threads_; ++tid) {
//// if (0 == local_queues_ends[tid]) continue;
//// idi r = merge_two_queues_into_1st_queue_seq_fixed(
//// set_L,
//// 0,
//// L,
////// local_queues_list[tid],
////// 0,
//// local_queues_array,
//// tid * local_queue_length,
//// local_queues_ends[tid]);
////// L + 1);
//// local_queues_ends[tid] = 0; // Reset the local queue
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// }
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//
// {// Reset
//// is_visited.reset();
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//}
/*
* 5/7/2020-15:14
* Use 1 threads to scale M until the value_M_middle.
* Then use multiple threads.
*/
inline void Searching::para_search_with_top_m_merge_queues_middle_m(
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector<idi> &local_queues_ends, // Sizes of local queue
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited)
{
// time_initialization_ -= WallTimer::get_time_mark();
// const idi base_set_L = (num_threads_ - 1) * local_queue_length;
//#pragma omp parallel for
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
uint64_t tmp_count_computation = 0;
// Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
}
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
// std::sort(set_L.begin(), set_L.begin() + L);
std::sort(
set_L.begin() + base_set_L,
set_L.begin() + base_set_L + L);
local_queues_ends[num_threads_ - 1] = L;
// time_initialization_ += WallTimer::get_time_mark();
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
idi M = 1;
// time_sequential_phase_ -= WallTimer::get_time_mark();
{ // Single thread
while (k < L && M < value_M_middle) {
++tmp_count;
// {//test
// printf("tmp_count: %d\n", tmp_count);
// }
// Select M candidates
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
idi index_set_L = c_i + base_set_L;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
}
idi nk = L;
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L - 1 + base_set_L].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Thread 0 maintains the "global" queue
idi r = add_into_queue(
set_L,
base_set_L,
local_queues_ends[num_threads_ - 1],
L,
cand);
if (r < nk) {
nk = r;
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1;
} else {
M = value_M_max;
}
}
}
}
// time_sequential_phase_ += WallTimer::get_time_mark();
// time_parallel_phase_ -= WallTimer::get_time_mark();
// uint64_t tmp_count_add_to_queue = 0;
// double tmp_time_pick_top_m = 0;
// double tmp_time_distance_computation = 0;
// double tmp_time_add_to_queue = 0.0;
{ // Multiple Threads
while (k < L) {
// time_expand_ -= WallTimer::get_time_mark();
++tmp_count;
// {//test
// printf("tmp_count: %d\n", tmp_count);
// }
// Select M candidates
idi last_k = L;
// time_pick_top_m_ -= WallTimer::get_time_mark();
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
idi index_set_L = c_i + base_set_L;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
}
// time_pick_top_m_ += WallTimer::get_time_mark();
idi nk = L;
// Push M candidates' neighbors into the queue.
#pragma omp parallel for reduction(+ : tmp_count_computation)
// reduction(+ : tmp_count_add_to_queue) \
// reduction(+ : tmp_time_pick_top_m) \
// reduction(+ : tmp_time_distance_computation) \
// reduction(+ : tmp_time_add_to_queue)
// for (int tid = 0; tid < num_threads_; ++tid) {
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// tmp_time_pick_top_m -= WallTimer::get_time_mark();
int tid = omp_get_thread_num();
idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// tmp_time_pick_top_m += WallTimer::get_time_mark();
for (idi e_i = 0; e_i < out_degree; ++e_i) {
// tmp_time_distance_computation -= WallTimer::get_time_mark();
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
// tmp_time_distance_computation += WallTimer::get_time_mark();
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// tmp_time_distance_computation += WallTimer::get_time_mark();
if (dist > set_L[L - 1 + base_set_L].distance_) {
continue;
}
// ++tmp_count_add_to_queue;
Candidate cand(nb_id, dist, false);
// Add to the local queue.
// tmp_time_pick_top_m -= WallTimer::get_time_mark();
// tmp_time_add_to_queue -= WallTimer::get_time_mark();
if (0 != tid) {
// Non-Master threads using local queues
add_into_queue(
set_L,
(tid - 1) * local_queue_length,
local_queues_ends[tid - 1],
local_queue_length,
cand);
} else {
// Thread 0 maintains the "global" queue
idi r = add_into_queue(
set_L,
base_set_L,
local_queues_ends[num_threads_ - 1],
L,
cand);
if (r < nk) {
nk = r;
}
}
// tmp_time_add_to_queue += WallTimer::get_time_mark();
// tmp_time_pick_top_m += WallTimer::get_time_mark();
}
}
// time_add_to_queue_ += tmp_time_add_to_queue;
// tmp_time_add_to_queue = 0;
// }
// time_distance_computation_ += tmp_time_distance_computation;
// tmp_time_distance_computation = 0;
// time_pick_top_m_ += tmp_time_pick_top_m;
// tmp_time_pick_top_m = 0;
// count_add_to_queue_ += tmp_count_add_to_queue;
// tmp_count_add_to_queue = 0;
top_m_candidates_end = 0; // Clear top_m_candidates
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
// time_expand_ += WallTimer::get_time_mark();
// // Merge. Merge all queues in parallel.
{
time_merge_ -= WallTimer::get_time_mark();
if (num_threads_ > 1) {
idi r = merge_all_queues_para_array(
set_L,
local_queues_ends,
local_queue_length,
L);
if (r < nk) {
nk = r;
}
}
time_merge_ += WallTimer::get_time_mark();
}
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1;
} else {
M = value_M_max;
}
}
}
}
// time_parallel_phase_ += WallTimer::get_time_mark();
// time_ending_ -= WallTimer::get_time_mark();
#pragma omp parallel for
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i + base_set_L].id_;
// set_K[k_i] = set_L[k_i].id_;
}
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
// is_visited.clear_all();
std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
}
// time_ending_ += WallTimer::get_time_mark();
// {//test
// if (3 == query_id) {
// exit(1);
// }
// }
}
inline void Searching::para_search_with_top_m_merge_queues_middle_m_no_merge(
const uint64_t computation_threshold,
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
const idi init_size,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector<idi> &local_queues_ends, // Sizes of local queue
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited)
{
uint64_t count_single_query_computation = 0;
uint64_t count_init_computation = 0;
uint64_t count_seq_computation = 0;
uint64_t count_par_computation = 0;
// {//test
// printf("query_id: %u\n", query_id);
// }
// time_initialization_ -= WallTimer::get_time_mark();
// const idi base_set_L = (num_threads_ - 1) * local_queue_length;
{
#pragma omp parallel for
for (idi c_i = 0; c_i < init_size; ++c_i) {
// for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
// is_visited.atomic_set_bit(init_ids[c_i]);
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < init_size; ++v_i) {
// for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
uint64_t tmp_count_computation = 0;
// Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (unsigned i = 0; i < init_size; i++) {
// for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
// ++count_distance_computation_;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
count_distance_computation_ += tmp_count_computation;
count_init_computation += tmp_count_computation;
count_single_query_computation += tmp_count_computation;
tmp_count_computation = 0;
// std::sort(set_L.begin(), set_L.begin() + L);
std::sort(
set_L.begin() + base_set_L,
set_L.begin() + base_set_L + init_size);
// set_L.begin() + base_set_L + L);
local_queues_ends[num_threads_ - 1] = init_size;
// local_queues_ends[num_threads_ - 1] = L;
// time_initialization_ += WallTimer::get_time_mark();
// time_sequential_phase_ -= WallTimer::get_time_mark();
// std::vector<idi> top_m_candidates(M);
idi &global_queue_size = local_queues_ends[num_threads_ - 1];
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
idi M = 1;
{ // Single thread
while (k < L && M < value_M_middle && count_single_query_computation <= computation_threshold) {
++tmp_count;
// {//test
// printf("tmp_count: %d\n", tmp_count);
// }
// int real_threads = std::min(static_cast<int>(M), num_threads_);
// idi queue_base = num_threads_ - real_threads;
// Select M candidates
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < global_queue_size && top_m_candidates_end < M; ++c_i) {
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
idi index_set_L = c_i + base_set_L;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
}
idi nk = L;
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation_;
++tmp_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[global_queue_size - 1 + base_set_L].distance_) {
// if (dist > set_L[L - 1 + base_set_L].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Thread 0 maintains the "global" queue
idi r = add_into_queue(
set_L,
base_set_L,
global_queue_size,
// local_queues_ends[num_threads_ - 1],
L,
cand);
if (r < nk) {
nk = r;
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
count_distance_computation_ += tmp_count_computation;
count_seq_computation += tmp_count_computation;
count_single_query_computation += tmp_count_computation;
tmp_count_computation = 0;
// {// Local queues' ends
// printf("query%u:iter: %u", query_id, tmp_count);
// for (int i_t = 0; i_t < num_threads_; ++i_t) {
// printf(" [%u]: %u", i_t, local_queues_ends[i_t]);
// }
// printf("\n");
// }
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1;
} else {
M = value_M_max;
}
}
}
}
// time_sequential_phase_ += WallTimer::get_time_mark();
// time_parallel_phase_ -= WallTimer::get_time_mark();
{ // Multiple Threads
while (k < L and count_single_query_computation <= computation_threshold) {
// while (k < L) {
++tmp_count;
// {//test
// printf("tmp_count: %d "
// "k: %u "
// "global_queue_size: %u\n",
// tmp_count,
// k,
// global_queue_size);
// }
// int real_threads = std::min(static_cast<int>(M), num_threads_);
// idi queue_base = num_threads_ - real_threads;
// Select M candidates
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < global_queue_size && top_m_candidates_end < M; ++c_i) {
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
idi index_set_L = c_i + base_set_L;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
}
idi nk = L;
// Push M candidates' neighbors into the queue.
//#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads)
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
int tid = omp_get_thread_num();
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation_;
++tmp_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[global_queue_size - 1 + base_set_L].distance_) {
// if (dist > set_L[L - 1 + base_set_L].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Add to the local queue.
if (0 != tid) {
// Non-Master threads using local queues
add_into_queue(
set_L,
(tid - 1) * local_queue_length,
local_queues_ends[tid - 1],
local_queue_length,
cand);
} else {
// Thread 0 maintains the "global" queue
idi r = add_into_queue(
set_L,
base_set_L,
global_queue_size,
// local_queues_ends[num_threads_ - 1],
L,
cand);
if (r < nk) {
nk = r;
}
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
count_distance_computation_ += tmp_count_computation;
count_par_computation += tmp_count_computation;
count_single_query_computation += tmp_count_computation;
tmp_count_computation = 0;
// {// Local queues' ends
// printf("query%u:iter: %u", query_id, tmp_count);
// for (int i_t = 0; i_t < num_threads_; ++i_t) {
// printf(" [%u]: %u", i_t, local_queues_ends[i_t]);
// }
// printf("\n");
// }
// Merge. Merge all queues in parallel.
{
if (num_threads_ > 1) {
// idi r = merge_all_queues_queue_base(
// set_L,
// local_queues_ends,
// queue_base,
// real_threads,
// local_queue_length,
// L);
idi r = merge_all_queues_para_array(
set_L,
local_queues_ends,
local_queue_length,
L);
if (r < nk) {
nk = r;
}
}
}
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1;
} else {
M = value_M_max;
}
}
// {// Print relative distance
//// distf top_dist = set_L[base_set_L].distance_;
// for (idi i_l = 0; i_l < L; ++i_l) {
// printf("%u %f\n",
// tmp_count, set_L[i_l + base_set_L].distance_);
//// tmp_count, set_L[i_l + base_set_L].distance_ - top_dist);
// }
// }
}
}
// time_parallel_phase_ += WallTimer::get_time_mark();
#pragma omp parallel for
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i + base_set_L].id_;
// set_K[k_i] = set_L[k_i].id_;
}
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
// is_visited.clear_all();
std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
}
// {//test
// if (3 == query_id) {
// exit(1);
// }
// }
// {//test
// printf("count_single: %lu "
// "ct_init: %lu "
// "ct_seq: %lu "
// "ct_par: %lu\n",
// count_single_query_computation,
// count_init_computation,
// count_seq_computation,
// count_par_computation);
// }
}
///*
// * 6/15/2020-14:40
// * Queues merging together to the global queue
// */
//inline void Searching::para_search_with_top_m_merge_queues_sequential_merge(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<idi> &top_m_candidates,
// boost::dynamic_bitset<> &is_visited)
//{
//// const idi base_set_L = (num_threads_ - 1) * local_queue_length;
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
//// is_visited.atomic_set_bit(init_ids[c_i]);
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// uint64_t tmp_count_computation = 0;
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
//// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//// std::sort(set_L.begin(), set_L.begin() + L);
// std::sort(
// set_L.begin() + base_set_L,
// set_L.begin() + base_set_L + L);
// local_queues_ends[num_threads_ - 1] = L;
//
//// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// idi M = 1;
//
// { // Single thread
// while (k < L && M < value_M_middle) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
//
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
//
// { // Multiple Threads
// while (k < L) {
// ++tmp_count;
//// {//test
//// if (num_threads_ == 2) {
//// printf("tmp_count: %d "
//// "k: %u\n",
//// tmp_count,
//// k);
//// }
//// }
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads)
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// if (0 != tid) {
// // Non-Master threads using local queues
// add_into_queue(
// set_L,
// (tid - 1) * local_queue_length,
// local_queues_ends[tid - 1],
// local_queue_length,
// cand);
// } else {
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
//// // Merge. Merge all queues in parallel.
// {
//// {//test
//// for (idi q_i = 0; q_i < num_threads_; ++q_i) {
//// if (0 == local_queues_ends[q_i]) {
//// continue;
//// }
//// for (idi e_i = 0; e_i < local_queues_ends[q_i]; ++e_i) {
//// printf("tmp_count: %u "
//// "q_i: %u "
//// "[%u]: (%u, %f)\n",
//// tmp_count,
//// q_i,
//// e_i, set_L[q_i * local_queue_length + e_i].id_, set_L[q_i * local_queue_length + e_i].distance_);
//// }
//// }
//// }
//// time_merge_ -= WallTimer::get_time_mark();
// if (num_threads_ > 1) {
// idi r = merge_all_queues_all_together_in_sequential(
// set_L,
// local_queues_ends,
// local_queue_length,
// L);
//// idi r = merge_all_queues_para_array(
//// set_L,
//// local_queues_ends,
//// local_queue_length,
//// L);
// if (r < nk) {
// nk = r;
// }
//// {//test
//// printf("tmp_count: %u "
//// "r: %u "
//// "last_k: %u\n",
//// tmp_count,
//// r,
//// last_k);
//// for (idi l_i = 0; l_i < L; ++l_i) {
//// printf("tmp_count: %u "
//// "[%u]: (%u, %f)\n",
//// tmp_count,
//// l_i, set_L[l_i + base_set_L].id_, set_L[l_i + base_set_L].distance_);
//// }
//// }
// }
//
//// time_merge_ += WallTimer::get_time_mark();
// }
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i + base_set_L].id_;
//// set_K[k_i] = set_L[k_i].id_;
// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//
//// {//test
//// if (0 == query_id) {
//// exit(1);
//// }
//// }
//}
///*
// * 6/19/2020:
// * Intra-query + Inter-query
// */
//inline void Searching::para_search_with_top_m_nested_para(
// const idi batch_start,
// const idi batch_size,
// const idi value_M_middle,
// const idi value_M_max,
// const idi K,
// const idi L,
// std::vector< std::vector<Candidate> > &set_L_list,
// const std::vector<idi> &init_ids,
// std::vector< std::vector<idi> > &set_K_list,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_intra_query_ - 1) * local_queue_length;
// std::vector< std::vector<idi> > &local_queues_ends_list, // Sizes of local queue
// std::vector< std::vector<idi> > &top_m_candidates_list,
// std::vector< boost::dynamic_bitset<> > &is_visited_list)
//{
// {// Initialize is_visited flag array
//#pragma omp parallel for num_threads(num_threads_inter_query_)
// for (idi q_i = 0; q_i < batch_size; ++q_i) {
// auto &is_visited = is_visited_list[q_i];
//#pragma omp parallel for num_threads(num_threads_intra_query_)
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
// }
//
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
//
// uint64_t tmp_count_total_computation = 0;
//#pragma omp parallel for num_threads(num_threads_inter_query_) reduction(+ : tmp_count_total_computation)
// for (idi q_i = 0; q_i < batch_size; ++q_i) {
// idi query_id = batch_start + q_i;
// auto &set_L = set_L_list[q_i];
// auto &local_queues_ends = local_queues_ends_list[q_i];
// auto &is_visited = is_visited_list[q_i];
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
////#pragma omp parallel for
//// for (idi v_i = 0; v_i < L; ++v_i) {
//// idi v_id = init_ids[v_i];
//// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
//// }
// uint64_t tmp_count_computation = 0;
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
//#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(num_threads_intra_query_)
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
//// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
//// count_distance_computation_ += tmp_count_computation;
// tmp_count_total_computation += tmp_count_computation;
// tmp_count_computation = 0;
//// std::sort(set_L.begin(), set_L.begin() + L);
// std::sort(
// set_L.begin() + base_set_L,
// set_L.begin() + base_set_L + L);
// local_queues_ends[num_threads_intra_query_ - 1] = L;
//
//// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// idi M = 1;
//
// auto &top_m_candidates = top_m_candidates_list[q_i];
// { // Single thread
// while (k < L && M < value_M_middle) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
//// {//test
//// if (391655 == nb_id) {
//// printf("tmp_count: %u "
//// "nb_id: %u "
//// "distf: %f\n",
//// tmp_count,
//// nb_id,
//// dist);
//// }
//// }
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_intra_query_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//// count_distance_computation_ += tmp_count_computation;
// tmp_count_total_computation += tmp_count_computation;
// tmp_count_computation = 0;
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
//
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
//
// { // Multiple Threads
// while (k < L) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(num_threads_intra_query_)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
//// {//test
//// if (391655 == nb_id) {
//// printf("tmp_count: %u "
//// "nb_id: %u "
//// "distf: %f\n",
//// tmp_count,
//// nb_id,
//// dist);
//// }
//// }
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// if (0 != tid) {
// // Non-Master threads using local queues
// add_into_queue(
// set_L,
// (tid - 1) * local_queue_length,
// local_queues_ends[tid - 1],
// local_queue_length,
// cand);
// } else {
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_intra_query_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//// count_distance_computation_ += tmp_count_computation;
// tmp_count_total_computation += tmp_count_computation;
// tmp_count_computation = 0;
//
//// // Merge. Merge all queues in parallel.
// {
//// time_merge_ -= WallTimer::get_time_mark();
// if (num_threads_intra_query_ > 1) {
// idi r = merge_all_queues_para_array(
// set_L,
// local_queues_ends,
// local_queue_length,
// L);
// if (r < nk) {
// nk = r;
// }
// }
//// time_merge_ += WallTimer::get_time_mark();
// }
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
// count_distance_computation_ += tmp_count_total_computation;
// tmp_count_total_computation = 0;
//
// auto &set_K = set_K_list[query_id];
//
//#pragma omp parallel for num_threads(num_threads_intra_query_)
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i + base_set_L].id_;
//// set_K[k_i] = set_L[k_i].id_;
// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
// }
//
//// {//test
//// if (3 == query_id) {
//// exit(1);
//// }
//// }
//// {
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// printf("%u: (%u %f)\n",
//// k_i, set_L_list[0][k_i].id_, set_L_list[0][k_i].distance_);
//// }
//// if (0 == batch_start) {
//// exit(1);
//// }
//// }
//}
/*
* 6/22/2020-21:30
* Do searching on the local_set_L
* local_set_L is already sorted
* is_visited is already set up.
*/
inline void Searching::subsearch_with_top_m(
const idi value_M_max,
const idi query_id,
const idi local_L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &local_top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &local_count_distance_computation)
{
const dataf *query_data = queries_load_ + query_id * dimension_;
// idi local_top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi iter = 0;
idi M = 1; // value of M
while (k < local_L) {
++iter;
subsearch_top_m_for_one_iteration(
iter,
k,
M,
query_id,
query_data,
local_L,
set_L,
set_L_start,
set_L_size,
local_top_m_candidates,
is_visited,
local_count_distance_computation);
{// Scale M
if (M < value_M_max) {
M <<= 1;
} else {
M = value_M_max;
}
}
}
// {//test
// printf("set_L_start: %u "
// "local_count_distance_computation: %lu\n",
// set_L_start,
// local_count_distance_computation);
// }
}
//// Backup
//inline void Searching::subsearch_with_top_m(
// const idi value_M_max,
// const idi query_id,
// const idi local_L,
// std::vector<Candidate> &set_L,
// const idi base_set_L,
// idi &set_L_end,
// std::vector<idi> &local_top_m_candidates,
// boost::dynamic_bitset<> &is_visited,
// uint64_t &local_count_distance_computation)
//{
// const dataf *query_data = queries_load_ + query_id * dimension_;
// idi local_top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi iter = 0;
// idi M = 1; // value of M
//
// while (k < local_L) {
// ++iter;
// // Select M candidates
// idi last_k = local_L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < set_L_end && local_top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// local_top_m_candidates[local_top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = local_L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < local_top_m_candidates_end; ++c_i) {
// idi cand_id = local_top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++local_count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[set_L_end - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// set_L_end,
// local_L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// local_top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
//
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
//}
/*
* 7/6/2020-23:17
* Subsearch only 1 iteration using top-m
*/
inline void Searching::subsearch_top_m_for_one_iteration(
const idi iter,
idi &k_uc,
const idi value_M,
const idi query_id,
const dataf *query_data,
const idi L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &count_distance_computation)
{
// Select M candidates
idi top_m_candidates_end = 0;
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k_uc; c_i < set_L_size && top_m_candidates_end < value_M; ++c_i) {
idi index_set_L = c_i + set_L_start;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// {//test
// M_ids_.push_back(set_L[index_set_L].id_);
// }
}
idi nk = L;
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++count_distance_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[set_L_size - 1 + set_L_start].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
idi r = add_into_queue(
set_L,
set_L_start,
set_L_size,
L,
cand);
if (r < nk) {
nk = r;
}
}
}
// top_m_candidates_end = 0; // Clear top_m_candidates
if (nk <= last_k) {
k_uc = nk;
} else {
k_uc = last_k + 1;
}
// {//test
// for (idi l_i = 0; l_i < set_L_size; ++l_i) {
// L_ids_.push_back(set_L[set_L_start + l_i].id_);
// }
// std::sort(L_ids_.begin(), L_ids_.end());
// std::sort(M_ids_.begin(), M_ids_.end());
// for (idi m_i = 0; m_i < M_ids_.size(); ++m_i) {
// printf("query_id: %u "
// "iter: %u "
// "M[%u]: "
// "%u\n",
// query_id,
// iter,
// m_i,
// M_ids_[m_i]);
// }
// M_ids_.clear();
// for (idi l_i = 0; l_i < L_ids_.size(); ++l_i) {
// printf("query_id: %u "
// "iter: %u "
// "L[%u]: "
// "%u\n",
// query_id,
// iter,
// l_i,
// L_ids_[l_i]);
// }
// L_ids_.clear();
// }
}
///*
// * One more parameter for distance bound
// */
//inline void Searching::subsearch_top_m_for_one_iteration_lth(
// const distf bound_lth,
// const idi iter,
// idi &k_uc,
// const idi value_M,
// const idi query_id,
// const dataf *query_data,
// const idi L,
// std::vector<Candidate> &set_L,
// const idi set_L_start,
// idi &set_L_size,
// std::vector<idi> &top_m_candidates,
// boost::dynamic_bitset<> &is_visited,
// uint64_t &count_distance_computation)
//{
// // Select M candidates
// idi top_m_candidates_end = 0;
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k_uc; c_i < set_L_size && top_m_candidates_end < value_M; ++c_i) {
// idi index_set_L = c_i + set_L_start;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > bound_lth) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// idi r = add_into_queue(
// set_L,
// set_L_start,
// set_L_size,
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
//
// if (nk <= last_k) {
// k_uc = nk;
// } else {
// k_uc = last_k + 1;
// }
//}
/*
* 7/24/2020-10:53
* Subsearch for one iteration, with the global L-th value as the bound,
* and the top_m_position indicates the bound for local top-M vertices.
*/
inline void Searching::subsearch_top_m_for_one_iteration_lth_mth(
const distf bound_lth,
// const idi top_m_position,
const idi iter,
idi &k_uc,
const idi local_m_count,
const idi query_id,
const dataf *query_data,
const idi L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &count_distance_computation,
double &time_pick_top_m,
uint64_t &count_add_to_queue,
double &time_distance_computation,
double &time_add_to_queue)
{
// {//test
// printf("query_id: %u "
// "iter: %u "
// "tid: %u \n",
// query_id,
// iter,
// omp_get_thread_num());
// }
// Select M candidates
idi top_m_candidates_end = 0;
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k_uc; c_i < top_m_position; ++c_i) {
time_pick_top_m -= WallTimer::get_time_mark();
for (idi c_i = k_uc; c_i < set_L_size && top_m_candidates_end < local_m_count; ++c_i) {
idi index_set_L = c_i + set_L_start;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// {//test
// M_ids_.push_back(set_L[index_set_L].id_);
// }
}
time_pick_top_m += WallTimer::get_time_mark();
idi nk = L;
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
time_pick_top_m -= WallTimer::get_time_mark();
idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
time_pick_top_m += WallTimer::get_time_mark();
for (idi e_i = 0; e_i < out_degree; ++e_i) {
time_distance_computation -= WallTimer::get_time_mark();
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
time_distance_computation += WallTimer::get_time_mark();
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++count_distance_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
time_distance_computation += WallTimer::get_time_mark();
if (dist > set_L[set_L_start + set_L_size - 1].distance_) {
// if (dist > bound_lth) {
continue;
}
++count_add_to_queue;
Candidate cand(nb_id, dist, false);
// time_pick_top_m -= WallTimer::get_time_mark();
time_add_to_queue -= WallTimer::get_time_mark();
idi r = add_into_queue(
set_L,
set_L_start,
set_L_size,
L,
cand);
if (r < nk) {
nk = r;
}
time_add_to_queue += WallTimer::get_time_mark();
// time_pick_top_m += WallTimer::get_time_mark();
}
}
if (nk <= last_k) {
k_uc = nk;
} else {
k_uc = last_k + 1;
}
}
///*
// * 7/26/2020-15:41
// * L-th and M-th Selection.
// * Seq-Par Phases: when M is 1 and 2, do sequential searching;
// * When M is equal and larger than 4, do parallel searching.
// * It's for load-balance issue.
// */
//inline void Searching::para_search_with_top_m_subsearch_v3(
// const idi local_M_max,
// const idi local_M_middle,
// const idi query_id,
// const idi K,
// const idi global_L,
// const idi local_L,
//// const idi total_L,
//// const idi init_queue_size,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const std::vector<idi> &local_queues_starts,
// std::vector<idi> &local_queues_sizes,
// std::vector<idi> &local_m_counts,
// std::vector< std::vector<idi> > &top_m_candidates_list,
// boost::dynamic_bitset<> &is_visited)
//{
// time_initialization_ -= WallTimer::get_time_mark();
// uint64_t tmp_count_computation = 0;
// {// Initialization
// // is_visited flag array
////#pragma omp parallel for
//// Cannot use OMP for bit array is_visited!
// for (idi c_i = 0; c_i < global_L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < global_L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
//
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (idi id_i = 0; id_i < global_L; ++id_i) {
// idi v_id = init_ids[id_i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[id_i] = Candidate(v_id, dist, false); // False means not checked.
// }
// local_queues_sizes[0] = global_L;
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
// std::sort(set_L.begin(), set_L.begin() + global_L);
// }
// time_initialization_ += WallTimer::get_time_mark();
//
// // Searching
// if (num_threads_ == 1) { // Single threads
//// std::sort(
//// set_L.begin(),
//// set_L.end());
// subsearch_with_top_m(
// local_M_max,
// query_id,
// local_L,
// set_L,
// 0,
// local_queues_sizes[0],
// top_m_candidates_list[0],
// is_visited,
// tmp_count_computation);
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
// } else { // Multiple threads
// const dataf *query_data = queries_load_ + query_id * dimension_;
// const idi num_queues = num_threads_;
// idi local_M = 1;
// idi iter = 0;
// std::vector<idi> ks(num_queues, 0);
//
// time_sequential_phase_ -= WallTimer::get_time_mark();
// {// Sequential Search for M = 1, 2.
// idi &k = ks[0];
// while (k < global_L && local_M < local_M_middle) {
// ++iter;
// subsearch_top_m_for_one_iteration(
// iter,
// k,
// local_M,
// query_id,
// query_data,
// global_L,
// set_L,
// 0,
// local_queues_sizes[0],
// top_m_candidates_list[0],
// is_visited,
// tmp_count_computation);
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
// {// Double M
// if (local_M < local_M_max) {
// local_M <<= 1;
// }
// }
// }
// }
// time_sequential_phase_ += WallTimer::get_time_mark();
//
// time_parallel_phase_ -= WallTimer::get_time_mark();
// distf bound_lth = set_L[global_L - 1].distance_;
// {// Parallel Search for M >= 4, or local_M_middle
// time_assign_s_ -=WallTimer::get_time_mark();
// {// Assign elements from Queue[0] to others
// idi dst_i = 1;
// for (idi e_i = 1; e_i < global_L; ++e_i) {
// idi dest_sub = e_i % num_queues;
// if (0 == dest_sub) {
// set_L[dst_i++] = set_L[e_i];
// } else {
// set_L[local_queues_starts[dest_sub] + local_queues_sizes[dest_sub]++] = set_L[e_i];
// }
// }
// local_queues_sizes[0] = dst_i;
// }
// std::fill(ks.begin(), ks.end(), 0);
//
//
// selecting_unchecked_top_M_seq(
// query_id,
// iter,
// set_L,
// ks,
// local_M,
// num_queues,
// local_queues_starts,
// local_queues_sizes,
// local_m_counts);
// time_assign_s_ +=WallTimer::get_time_mark();
//
// double tmp_time_pick_top_m = 0;
// uint64_t tmp_count_add_to_queue = 0;
// uint8_t not_finished = 1;
// double tmp_time_distance_computation = 0;
// double tmp_time_add_to_queue = 0;
// while (true) {
// time_expand_ -= WallTimer::get_time_mark();
// not_finished = 0;
// ++iter;
//#pragma omp parallel for reduction(+ : tmp_count_computation) \
// reduction(+ : tmp_time_pick_top_m) \
// reduction(+ : tmp_count_add_to_queue) \
// reduction(+ : tmp_time_distance_computation) \
// reduction(+ : tmp_time_add_to_queue)
// for (idi q_i = 0; q_i < num_queues; ++q_i) {
// tmp_time_pick_top_m -= WallTimer::get_time_mark();
// idi L_value = q_i == 0 ? global_L : local_L;
// idi &k = ks[q_i];
// idi &local_queue_size = local_queues_sizes[q_i];
// auto &local_top_m_candidates = top_m_candidates_list[q_i];
// idi local_m_count = local_m_counts[q_i];
//// if (local_M < num_queues && !local_m_count) {
//// local_m_count = 1;
//// }
// tmp_time_pick_top_m += WallTimer::get_time_mark();
// if (!local_m_count) {
// continue;
// }
// not_finished = 1;
// const idi local_queue_start = local_queues_starts[q_i];
//
// subsearch_top_m_for_one_iteration_lth_mth(
// bound_lth,
// iter,
// k,
// local_m_count,
// query_id,
// query_data,
// L_value,
// set_L,
// local_queue_start,
// local_queue_size,
// local_top_m_candidates,
// is_visited,
// tmp_count_computation,
// tmp_time_pick_top_m,
// tmp_count_add_to_queue,
// tmp_time_distance_computation,
// tmp_time_add_to_queue);
// }
// time_add_to_queue_ += tmp_time_add_to_queue;
// tmp_time_add_to_queue = 0;
// time_distance_computation_ += tmp_time_distance_computation;
// tmp_time_distance_computation = 0;
// count_add_to_queue_ += tmp_count_add_to_queue;
// tmp_count_add_to_queue = 0;
// time_pick_top_m_ += tmp_time_pick_top_m;
// tmp_time_pick_top_m = 0;
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
// time_expand_ += WallTimer::get_time_mark();
// if (!not_finished) {
// break;
// }
// {// Scale M
// if (local_M < local_M_max) {
// local_M <<= 1;
// }
//// else {
//// local_M = value_M_max;
//// }
// }
// time_select_ -= WallTimer::get_time_mark();
//#pragma omp parallel sections
// {
//#pragma omp section
// {// Setecting and update local_queues_lengths
//// time_select_L_ -= WallTimer::get_time_mark();
// bound_lth = selecting_top_L_seq(
// set_L,
// global_L,
//// local_L,
// num_queues,
// local_queues_starts,
// local_queues_sizes);
//// time_select_L_ += WallTimer::get_time_mark();
// }
//#pragma omp section
// {
//// time_select_M_ -= WallTimer::get_time_mark();
// selecting_unchecked_top_M_seq(
// query_id,
// iter,
// set_L,
// ks,
// local_M,
// num_queues,
// local_queues_starts,
// local_queues_sizes,
// local_m_counts);
//// time_select_M_ += WallTimer::get_time_mark();
// }
// }
// time_select_ += WallTimer::get_time_mark();
//// {//test
//// printf("query_id: %u "
//// "iter: %u",
//// query_id,
//// iter);
//// printf(" local_queues_sizes:");
//// for (idi i = 0; i < num_queues; ++i) {
//// printf(" %u", local_queues_sizes[i]);
//// }
//// printf(" local_m_counts:");
//// for (idi i = 0; i < num_queues; ++i) {
//// printf(" %u", local_m_counts[i]);
//// }
//// printf(" ks:");
//// for (idi i = 0; i < num_queues; ++i) {
//// printf(" %u", ks[i]);
//// }
//// printf("\n");
//// }
// }
// }
// time_parallel_phase_ += WallTimer::get_time_mark();
// }
//
//// time_merge_ -= WallTimer::get_time_mark();
// time_ending_ -= WallTimer::get_time_mark();
// {// Return the results to set_K
// std::vector<idi> pointer(num_threads_, 0);
// // get the first
// distf min_dist = FLT_MAX;
// idi min_q_i;
// idi min_id;
// idi min_sub;
// idi last_id;
// for (int q_i = 0; q_i < num_threads_; ++q_i) {
// if (pointer[q_i] >= local_queues_sizes[q_i]) {
// continue;
// }
// idi sub = pointer[q_i] + local_queues_starts[q_i];
// distf tmp_dist = set_L[sub].distance_;
// idi tmp_id = set_L[sub].id_;
// if (tmp_dist < min_dist) {
// min_dist = tmp_dist;
// min_id = tmp_id;
// min_q_i = q_i;
// min_sub = sub;
// } else if (tmp_dist == min_dist && tmp_id < min_id) {
// min_id = tmp_id;
// min_q_i = q_i;
// min_sub = sub;
// }
// }
// set_K[0] = set_L[min_sub].id_;
//// {//test
//// printf("query_id: %u "
//// "[%u]: "
//// "(%u, %f)\n",
//// query_id,
//// 0,
//// set_L[min_sub].id_, set_L[min_sub].distance_);
//// }
// ++pointer[min_q_i];
// last_id = set_K[0];
//
// bool is_finished = false;
// idi k_i = 1;
// while (k_i < K && !is_finished) {
// is_finished = true;
// min_dist = FLT_MAX;
// for (int q_i = 0; q_i < num_threads_; ++q_i) {
// const idi local_queue_size = local_queues_sizes[q_i];
// idi sub = pointer[q_i] + local_queues_starts[q_i];
//
// while (pointer[q_i] < local_queue_size
// && set_L[sub].id_ == last_id) {
// ++pointer[q_i];
// ++sub;
// }
// if (pointer[q_i] >= local_queue_size) {
// continue;
// }
// is_finished = false;
// distf tmp_dist = set_L[sub].distance_;
// idi tmp_id = set_L[sub].id_;
// if (tmp_dist < min_dist) {
// min_dist = tmp_dist;
// min_id = tmp_id;
// min_q_i = q_i;
// min_sub = sub;
// } else if (tmp_dist == min_dist && tmp_id < min_id) {
// min_id = tmp_id;
// min_q_i = q_i;
// min_sub = sub;
// }
// }
// set_K[k_i] = set_L[min_sub].id_;
//// {//test
//// printf("query_id: %u "
//// "[%u]: "
//// "(%u, %f)\n",
//// query_id,
//// k_i,
//// set_L[min_sub].id_, set_L[min_sub].distance_);
//// }
// ++pointer[min_q_i];
// ++k_i;
// }
// }
//// time_merge_ += WallTimer::get_time_mark();
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
// std::fill(local_queues_sizes.begin() + 1, local_queues_sizes.end(), 0);
// }
//
// time_ending_ += WallTimer::get_time_mark();
//// {//test
//// if (3 == query_id) {
//// exit(1);
//// }
//// }
//}
/*
* 7/27/2020-15:33
* Same with v3, but gather top-m vertices together
*/
inline void Searching::para_search_with_top_m_subsearch_v4(
const idi local_M_max,
const idi local_M_middle,
const idi query_id,
const idi K,
const idi global_L,
const idi local_L,
// const idi total_L,
// const idi init_queue_size,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes,
// std::vector<idi> &local_m_counts,
std::vector<idi> &top_m_candidates,
// std::vector< std::vector<idi> > &top_m_candidates_list,
boost::dynamic_bitset<> &is_visited)
{
// time_initialization_ -= WallTimer::get_time_mark();
uint64_t tmp_count_computation = 0;
{// Initialization
// is_visited flag array
//#pragma omp parallel for
// Cannot use OMP for bit array is_visited!
for (idi c_i = 0; c_i < global_L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < global_L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (idi id_i = 0; id_i < global_L; ++id_i) {
idi v_id = init_ids[id_i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[id_i] = Candidate(v_id, dist, false); // False means not checked.
}
local_queues_sizes[0] = global_L;
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
std::sort(set_L.begin(), set_L.begin() + global_L);
}
// time_initialization_ += WallTimer::get_time_mark();
// Searching
if (num_threads_ == 1) { // Single threads
// std::sort(
// set_L.begin(),
// set_L.end());
subsearch_with_top_m(
local_M_max,
query_id,
global_L,
set_L,
0,
local_queues_sizes[0],
top_m_candidates,
is_visited,
tmp_count_computation);
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
} else { // Multiple threads
const dataf *query_data = queries_load_ + query_id * dimension_;
const idi num_queues = num_threads_;
idi local_M = 1;
idi iter = 0;
// std::vector<idi> ks(num_queues, 0);
// time_sequential_phase_ -= WallTimer::get_time_mark();
{// Sequential Search for M = 1, 2.
idi k = 0;
// idi &k = ks[0];
while (k < global_L && local_M < local_M_middle) {
++iter;
subsearch_top_m_for_one_iteration(
iter,
k,
local_M,
query_id,
query_data,
global_L,
set_L,
0,
local_queues_sizes[0],
top_m_candidates,
is_visited,
tmp_count_computation);
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
{// Double M
if (local_M < local_M_max) {
local_M <<= 1;
}
}
}
}
// time_sequential_phase_ += WallTimer::get_time_mark();
// time_parallel_phase_ -= WallTimer::get_time_mark();
distf bound_lth = set_L[global_L - 1].distance_;
{// Parallel Search for M >= 4, or local_M_middle
// time_assign_s_ -=WallTimer::get_time_mark();
{// Assign elements from Queue[0] to others
idi dst_i = 1;
for (idi e_i = 1; e_i < global_L; ++e_i) {
idi dest_sub = e_i % num_queues;
if (0 == dest_sub) {
set_L[dst_i++] = set_L[e_i];
} else {
set_L[local_queues_starts[dest_sub] + local_queues_sizes[dest_sub]++] = set_L[e_i];
}
}
local_queues_sizes[0] = dst_i;
}
// std::fill(ks.begin(), ks.end(), 0);
idi top_m_candidates_size = 0;
// selecting_unchecked_top_M_seq(
// query_id,
// iter,
// set_L,
// ks,
// local_M,
// num_queues,
// local_queues_starts,
// local_queues_sizes,
// local_m_counts);
// time_assign_s_ +=WallTimer::get_time_mark();
std::vector<idi> ks(num_queues, 0);
std::vector<idi> nks(num_queues);
std::vector<idi> bound_ks(num_queues);
// double tmp_time_pick_top_m = 0;
// uint64_t tmp_count_add_to_queue = 0;
// uint8_t not_finished = 1;
// double tmp_time_distance_computation = 0;
// double tmp_time_add_to_queue = 0;
while (true) {
// time_expand_ -= WallTimer::get_time_mark();
// not_finished = 0;
++iter;
// Gather top-M vertices
// time_pick_top_m_ -= WallTimer::get_time_mark();
gather_unchecked_top_M_seq(
query_id,
iter,
set_L,
ks,
local_M,
num_queues,
local_queues_starts,
local_queues_sizes,
top_m_candidates,
top_m_candidates_size,
bound_ks);
// time_pick_top_m_ += WallTimer::get_time_mark();
if (!top_m_candidates_size) {
// time_expand_ += WallTimer::get_time_mark();
break;
}
std::fill(nks.begin(), nks.end(), global_L);
// Expand top-M vertices
#pragma omp parallel for schedule(static, 1) reduction(+ : tmp_count_computation)
// reduction(+ : tmp_count_add_to_queue) \
// reduction(+ : tmp_time_distance_computation) \
// reduction(+ : tmp_time_pick_top_m) \
// reduction(+ : tmp_time_add_to_queue)
for (idi c_i = 0; c_i < top_m_candidates_size; ++c_i) {
// tmp_time_pick_top_m -= WallTimer::get_time_mark();
idi tid = omp_get_thread_num();
const idi set_L_start = local_queues_starts[tid];
idi &set_L_size = local_queues_sizes[tid];
idi &nk = nks[tid];
idi L_value = tid == 0 ? global_L : local_L;
idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// tmp_time_pick_top_m += WallTimer::get_time_mark();
// Expand cand_id's neighbors
for (idi e_i = 0; e_i < out_degree; ++e_i) {
// tmp_time_distance_computation -= WallTimer::get_time_mark();
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
// tmp_time_distance_computation += WallTimer::get_time_mark();
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// tmp_time_distance_computation += WallTimer::get_time_mark();
// if (dist > set_L[set_L_start + set_L_size - 1].distance_) {
if (dist > bound_lth) {
continue;
}
// ++tmp_count_add_to_queue;
Candidate cand(nb_id, dist, false);
// tmp_time_add_to_queue -= WallTimer::get_time_mark();
idi r = add_into_queue(
set_L,
set_L_start,
set_L_size,
L_value,
cand);
if (r < nk) {
nk = r;
}
// tmp_time_add_to_queue += WallTimer::get_time_mark();
}
}
top_m_candidates_size = 0;
// time_add_to_queue_ += tmp_time_add_to_queue;
// tmp_time_add_to_queue = 0;
// time_distance_computation_ += tmp_time_distance_computation;
// tmp_time_distance_computation = 0;
// count_add_to_queue_ += tmp_count_add_to_queue;
// tmp_count_add_to_queue = 0;
// time_pick_top_m_ += tmp_time_pick_top_m;
// tmp_time_pick_top_m = 0;
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
for (idi q_i = 0; q_i < num_queues; ++q_i) {
if (nks[q_i] < bound_ks[q_i]) {
ks[q_i] = nks[q_i];
} else {
ks[q_i] = bound_ks[q_i];
}
}
// time_expand_ += WallTimer::get_time_mark();
time_select_ -= WallTimer::get_time_mark();
{// Select L-th
bound_lth = selecting_top_L_seq(
set_L,
global_L,
num_queues,
local_queues_starts,
local_queues_sizes);
}
time_select_ += WallTimer::get_time_mark();
{// Scale M
if (local_M < local_M_max) {
local_M <<= 1;
}
}
// {//test
// printf("query_id: %u "
// "iter: %u",
// query_id,
// iter);
// printf(" local_queues_sizes:");
// for (idi i = 0; i < num_queues; ++i) {
// printf(" %u", local_queues_sizes[i]);
// }
//// printf(" local_m_counts:");
//// for (idi i = 0; i < num_queues; ++i) {
//// printf(" %u", local_m_counts[i]);
//// }
// printf(" ks:");
// for (idi i = 0; i < num_queues; ++i) {
// printf(" %u", ks[i]);
// }
// printf("\n");
// }
}
}
// time_parallel_phase_ += WallTimer::get_time_mark();
}
// time_merge_ -= WallTimer::get_time_mark();
// time_ending_ -= WallTimer::get_time_mark();
{// Return the results to set_K
std::vector<idi> pointer(num_threads_, 0);
// get the first
distf min_dist = FLT_MAX;
idi min_q_i;
idi min_id;
idi min_sub;
idi last_id;
for (int q_i = 0; q_i < num_threads_; ++q_i) {
if (pointer[q_i] >= local_queues_sizes[q_i]) {
continue;
}
idi sub = pointer[q_i] + local_queues_starts[q_i];
distf tmp_dist = set_L[sub].distance_;
idi tmp_id = set_L[sub].id_;
if (tmp_dist < min_dist) {
min_dist = tmp_dist;
min_id = tmp_id;
min_q_i = q_i;
min_sub = sub;
} else if (tmp_dist == min_dist && tmp_id < min_id) {
min_id = tmp_id;
min_q_i = q_i;
min_sub = sub;
}
}
set_K[0] = set_L[min_sub].id_;
// {//test
// printf("query_id: %u "
// "[%u]: "
// "(%u, %f)\n",
// query_id,
// 0,
// set_L[min_sub].id_, set_L[min_sub].distance_);
// }
++pointer[min_q_i];
last_id = set_K[0];
bool is_finished = false;
idi k_i = 1;
while (k_i < K && !is_finished) {
is_finished = true;
min_dist = FLT_MAX;
for (int q_i = 0; q_i < num_threads_; ++q_i) {
const idi local_queue_size = local_queues_sizes[q_i];
idi sub = pointer[q_i] + local_queues_starts[q_i];
while (pointer[q_i] < local_queue_size
&& set_L[sub].id_ == last_id) {
++pointer[q_i];
++sub;
}
if (pointer[q_i] >= local_queue_size) {
continue;
}
is_finished = false;
distf tmp_dist = set_L[sub].distance_;
idi tmp_id = set_L[sub].id_;
if (tmp_dist < min_dist) {
min_dist = tmp_dist;
min_id = tmp_id;
min_q_i = q_i;
min_sub = sub;
} else if (tmp_dist == min_dist && tmp_id < min_id) {
min_id = tmp_id;
min_q_i = q_i;
min_sub = sub;
}
}
set_K[k_i] = set_L[min_sub].id_;
// {//test
// printf("query_id: %u "
// "[%u]: "
// "(%u, %f)\n",
// query_id,
// k_i,
// set_L[min_sub].id_, set_L[min_sub].distance_);
// }
++pointer[min_q_i];
++k_i;
}
}
// time_merge_ += WallTimer::get_time_mark();
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
std::fill(local_queues_sizes.begin() + 1, local_queues_sizes.end(), 0);
}
// time_ending_ += WallTimer::get_time_mark();
// {//test
// if (3 == query_id) {
// exit(1);
// }
// }
}
/*
* 6/27/2020-12:33
* Do searching on the local_set_L
* local_set_L is already sorted
* is_visited is already set up.
*/
inline void Searching::subsearch_for_simple_search(
const idi query_id,
const idi local_L,
std::vector<Candidate> &set_L,
const idi base_set_L,
idi &set_L_end,
// std::vector<uint8_t> &is_visited,
boost::dynamic_bitset<> &is_visited,
uint64_t &local_count_distance_computation)
{
const dataf *query_data = queries_load_ + query_id * dimension_;
// idi local_top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi iter = 0;
// idi M = 1; // value of M
while (k < local_L) {
++iter;
// {//test
// printf("query_id: %u "
// "iter: %u\n",
// query_id,
// iter);
// }
// Select the top-1 unchecked candidate
idi top_1;
idi last_k = local_L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < set_L_end; ++c_i) {
idi index_set_L = c_i + base_set_L;
if (set_L[index_set_L].is_checked_) {
continue;
}
top_1 = set_L[index_set_L].id_;
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
// local_top_m_candidates[local_top_m_candidates_end++] = set_L[index_set_L].id_;
break;
}
if (last_k == local_L) {
break;
}
idi nk = local_L;
// Push top-1' neighbors into the queue.
idi cand_id = top_1;
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
// {// Critical edition
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
// }
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++local_count_distance_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// {
// if (0 == query_id
// && (785802 == nb_id
// || 180955 == nb_id
// || 240996 == nb_id
// || 813701 == nb_id
// || 708177 == nb_id
// || 87578 == nb_id
// || 561813 == nb_id
// || 701258 == nb_id
// || 872728 == nb_id)) {
//// && 180955 == nb_id) {
// printf("parent: %u "
// "nb_id: %u "
// "dist: %f "
// "base_set_L: %u "
// "set_L_end: %u\n",
// cand_id,
// nb_id,
// dist,
// base_set_L,
// set_L_end);
// }
// }
if (dist > set_L[set_L_end - 1 + base_set_L].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Thread 0 maintains the "global" queue
idi r = add_into_queue(
set_L,
base_set_L,
set_L_end,
local_L,
cand);
if (r < nk) {
nk = r;
}
}
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
}
}
/*
* 6/27/2020-12:26
* Is is good to use subsearch by every thread it self?
*/
inline void Searching::para_simple_search_subsearch(
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
// std::vector<uint8_t> &is_visited)
boost::dynamic_bitset<> &is_visited)
{
uint64_t tmp_count_computation = 0;
{// Initialization
// is_visited flag array
//#pragma omp parallel for
// Cannot use OMP for bit array is_visited!
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
// std::sort(
// set_L.begin(),
// set_L.begin() + L);
}
idi queue_end = L;
// Searching
if (num_threads_ == 1) { // Single threads
std::sort(
set_L.begin(),
set_L.end());
subsearch_for_simple_search(
query_id,
L,
set_L,
0,
queue_end,
is_visited,
tmp_count_computation);
count_distance_computation_ += tmp_count_computation;
// {
//// {//test
//// for (idi i = 0; i < queue_end; ++i) {
//// printf("start: "
//// "query_id: %u "
//// "set_L[%u]: "
//// "(%u %f)\n",
//// query_id,
//// i,
//// set_L[i].id_, set_L[i].distance_);
//// }
//// }
//
// idi half_length = queue_end / 2;
// std::sort(
// set_L.begin(),
// set_L.begin() + half_length);
//// {//test
//// for (idi i = 0; i < half_length; ++i) {
//// printf("sorted: "
//// "query_id: %u "
//// "set_L[%u]: "
//// "(%u %f)\n",
//// query_id,
//// i,
//// set_L[i].id_, set_L[i].distance_);
//// }
//// }
//
// subsearch_for_simple_search(
// query_id,
// half_length, // local_L
// set_L,
// 0, // base_set_L
// half_length, // set_L_end
// is_visited,
// tmp_count_computation);
//
//// {//test
//// for (idi i = 0; i < half_length; ++i) {
//// printf("subsearched: "
//// "query_id: %u "
//// "set_L[%u]: "
//// "(%u %f)\n",
//// query_id,
//// i,
//// set_L[i].id_, set_L[i].distance_);
//// }
//// }
//
// std::sort(
// set_L.begin() + half_length,
// set_L.end());
//
//// {//test
//// for (idi i = half_length; i < queue_end; ++i) {
//// printf("sorted: "
//// "query_id: %u "
//// "set_L[%u]: "
//// "(%u %f)\n",
//// query_id,
//// i,
//// set_L[i].id_, set_L[i].distance_);
//// }
//// }
//
// subsearch_for_simple_search(
// query_id,
// half_length, // local_L
// set_L,
// half_length, // base_set_L
// half_length, // set_L_end
// is_visited,
// tmp_count_computation);
//// {//test
//// for (idi i = half_length; i < queue_end; ++i) {
//// printf("subsearched: "
//// "query_id: %u "
//// "set_L[%u]: "
//// "(%u %f)\n",
//// query_id,
//// i,
//// set_L[i].id_, set_L[i].distance_);
//// }
//// }
//// {//test
//// for (idi i = 0; i < queue_end; ++i) {
//// printf("explored: "
//// "query_id: %u "
//// "set_L[%u]: "
//// "(%u %f)\n",
//// query_id,
//// i,
//// set_L[i].id_, set_L[i].distance_);
//// }
//// }
// count_distance_computation_ += tmp_count_computation;
//
// std::vector <Candidate> tmp_set_L(L);
// std::merge(set_L.begin(), set_L.begin() + half_length,
// set_L.begin() + half_length, set_L.end(),
// tmp_set_L.begin());
// std::copy(tmp_set_L.begin(), tmp_set_L.end(), set_L.begin());
//// {//test
//// for (idi i = 0; i < queue_end; ++i) {
//// printf("merged: "
//// "query_id: %u "
//// "set_L[%u]: "
//// "(%u %f)\n",
//// query_id,
//// i,
//// set_L[i].id_, set_L[i].distance_);
//// }
//// }
// }
} else { // Multiple threads
const idi num_queues = num_threads_;
const idi local_queue_length = (L - 1) / num_queues + 1;
// Parallel for
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (idi q_i = 0; q_i < num_queues; ++q_i) {
idi local_queue_base = q_i * local_queue_length;
if (local_queue_base >= L) {
continue;
}
idi local_queue_end = local_queue_length;
if (local_queue_base + local_queue_end > L) {
local_queue_end = L - local_queue_base;
}
std::sort(
set_L.begin() + local_queue_base,
set_L.begin() + local_queue_base + local_queue_end);
subsearch_for_simple_search(
query_id,
local_queue_end, // local_L
set_L,
local_queue_base, // base_set_L
local_queue_end, // set_L_end
is_visited,
tmp_count_computation);
}
count_distance_computation_ += tmp_count_computation;
// Merge
// time_merge_ -= WallTimer::get_time_mark();
merge_in_set_L(
set_L,
L,
num_queues,
local_queue_length);
// time_merge_ += WallTimer::get_time_mark();
}
{// Return the results to set_K
// How to deal with duplicate?
idi last_id = set_L[0].id_;
set_K[0] = last_id;
idi k_i = 1;
idi l_i = 1;
while (k_i < K && l_i < L) {
if (last_id == set_L[l_i].id_) {
++l_i;
continue;
}
last_id = set_L[l_i++].id_;
set_K[k_i++] = last_id;
}
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
//// set_K[k_i] = set_L[k_i].id_;
// }
}
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
// is_visited.clear_all();
}
// {//test
// if (0 == query_id) {
// exit(1);
// }
// }
}
///*
// * 6/22/2020-09:38
// * A synchronized last element as the sentinel
// */
//inline void Searching::para_search_with_top_m_merge_queues_global_threshold(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<idi> &top_m_candidates,
// boost::dynamic_bitset<> &is_visited)
//{
//// const idi base_set_L = (num_threads_ - 1) * local_queue_length;
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// uint64_t tmp_count_computation = 0;
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//// std::sort(set_L.begin(), set_L.begin() + L);
// std::sort(
// set_L.begin() + base_set_L,
// set_L.begin() + base_set_L + L);
// local_queues_ends[num_threads_ - 1] = L;
//
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// idi M = 1;
//
// { // Single thread
// while (k < L && M < value_M_middle) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
//
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
//
// }
// }
//
// { // Multiple Threads
// while (k < L) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads)
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// if (0 != tid) {
// // Non-Master threads using local queues
// add_into_queue(
// set_L,
// (tid - 1) * local_queue_length,
// local_queues_ends[tid - 1],
// local_queue_length,
// cand);
// } else {
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
// {// Local queues' ends
//// printf("query%u:iter: %u", query_id, tmp_count);
// idi total_elements = 0;
// for (int i_t = 0; i_t < num_threads_ - 1; ++i_t) {
// total_elements += local_queues_ends[i_t];
// }
// number_local_elements_ += total_elements;
//// printf(" total_elements: %u+%u\n", total_elements - local_queues_ends[num_threads_ - 1], local_queues_ends[num_threads_ - 1]);
//// for (int i_t = 0; i_t < num_threads_; ++i_t) {
//// printf(" [%u]: %u", i_t, local_queues_ends[i_t]);
//// }
//// printf("\n");
// }
//
//// // Merge. Merge all queues in parallel.
// {
// time_merge_ -= WallTimer::get_time_mark();
// if (num_threads_ > 1) {
// idi r = merge_all_queues_para_array(
// set_L,
// local_queues_ends,
// local_queue_length,
// L);
// if (r < nk) {
// nk = r;
// }
// }
// time_merge_ += WallTimer::get_time_mark();
// }
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
//
// }
// }
//
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i + base_set_L].id_;
//// set_K[k_i] = set_L[k_i].id_;
// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//
//// {//test
//// if (0 == query_id) {
//// exit(1);
//// }
//// }
//}
///*
// * 6/7/2020-16:55
// * Use 1 threads to scale M until the value_M_middle.
// * Then use multiple threads.
// * Except for Thread 0, other threads are collectors. They collect, but do not merge.
// * Only merge once after Thread 0 stops.
// */
//inline void Searching::para_search_with_top_m_merge_queues_collectors(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited)
//// std::vector<distf> &local_thresholds)
//// BitVector &is_visited)
//{
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
//// is_visited.atomic_set_bit(init_ids[c_i]);
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// uint64_t tmp_count_computation = 0;
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
//// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//// std::sort(set_L.begin(), set_L.begin() + L);
// std::sort(
// set_L.begin() + base_set_L,
// set_L.begin() + base_set_L + L);
//// boost::sort::block_indirect_sort(
//// set_L.begin() + base_set_L,
//// set_L.begin() + base_set_L + L,
//// num_threads_);
// local_queues_ends[num_threads_ - 1] = L;
//
//// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// idi M = 1;
//
// // Single thread
// {
// while (k < L && M < value_M_middle) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
//// int real_threads = std::min(static_cast<int>(M), num_threads_);
//// idi queue_base = num_threads_ - real_threads;
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
//
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
//
// // Multiple Threads
// {
//// while (k < L/num_threads_/2) {
// while (k < L) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//// int real_threads = std::min(static_cast<int>(M), num_threads_);
//// idi queue_base = num_threads_ - real_threads;
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi chunk_size;
// if (num_threads_ <= top_m_candidates_end) {
// chunk_size = (top_m_candidates_end - 1) / num_threads_ + 1;
// } else {
// chunk_size = 1;
// }
// idi nk = L;
// // Push M candidates' neighbors into the queue.
////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads)
////#pragma omp parallel for reduction(+ : tmp_count_computation)
//#pragma omp parallel for reduction(+ : tmp_count_computation) schedule(static, chunk_size)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
//// {
//// if (c_i < chunk_size && tid != 0) {
//// printf("query_id: %u "
//// "tmp_count: %u "
//// "chunk_size: %u "
//// "c_i: %u "
//// "tid: %u\n",
//// query_id,
//// tmp_count,
//// chunk_size,
//// c_i,
//// tid);
//// }
//// }
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// if (0 != tid) {
// // Non-Master threads using local queues
// add_into_queue(
// set_L,
// (tid - 1) * local_queue_length,
// local_queues_ends[tid - 1],
// local_queue_length,
// cand);
// } else {
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
////// // Merge. Merge all queues in parallel.
//// {
//// time_merge_ -= WallTimer::get_time_mark();
//// if (num_threads_ > 1) {
////// idi r = merge_all_queues_queue_base(
////// set_L,
////// local_queues_ends,
////// queue_base,
////// real_threads,
////// local_queue_length,
////// L);
//// idi r = merge_all_queues_para_array(
//// set_L,
//// local_queues_ends,
//// local_queue_length,
//// L);
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// time_merge_ += WallTimer::get_time_mark();
//// }
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
//
//// // Merge only once after Master Thread stops.
//// {
//// time_merge_ -= WallTimer::get_time_mark();
//// if (num_threads_ > 1) {
////// idi r = merge_all_queues_queue_base(
////// set_L,
////// local_queues_ends,
////// queue_base,
////// real_threads,
////// local_queue_length,
////// L);
//// merge_all_queues_para_array(
//// set_L,
//// local_queues_ends,
//// local_queue_length,
//// L);
//// }
//// time_merge_ += WallTimer::get_time_mark();
//// }
// }
//
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i + base_set_L].id_;
//// set_K[k_i] = set_L[k_i].id_;
// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//
//// {//test
//// printf("tmp_count: %u\n", tmp_count);
//// if (3 == query_id) {
//// exit(1);
//// }
//// }
//}
///*
// * 6/8/2020-16:39
// * Selecting rather than merging
// */
//inline void Searching::para_search_with_top_m_merge_queues_selecting(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited)
//{
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
//// is_visited.atomic_set_bit(init_ids[c_i]);
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// uint64_t tmp_count_computation = 0;
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
//// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//// std::sort(set_L.begin(), set_L.begin() + L);
// std::sort(
// set_L.begin() + base_set_L,
// set_L.begin() + base_set_L + L);
//// boost::sort::block_indirect_sort(
//// set_L.begin() + base_set_L,
//// set_L.begin() + base_set_L + L,
//// num_threads_);
// local_queues_ends[num_threads_ - 1] = L;
//
//// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// idi M = 1;
//
// // Single thread
// {
// while (k < L && M < value_M_middle) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
//// int real_threads = std::min(static_cast<int>(M), num_threads_);
//// idi queue_base = num_threads_ - real_threads;
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
//
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
//
// // Multiple Threads
// {
//// while (k < L/num_threads_/2) {
//// while (k < L) {
// while (true) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//// // Select M candidates
//// idi last_k = L;
////// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
//// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
//// idi index_set_L = c_i + base_set_L;
//// if (set_L[index_set_L].is_checked_) {
//// continue;
//// }
//// last_k = c_i; // Record the location of the last candidate selected.
//// set_L[index_set_L].is_checked_ = true;
//// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
//// }
//
// // Select M candidates
// {
// idi traverse_count = 0;
// idi bound_sub = L; // This is not always true!
// for (idi sub = 0; sub < bound_sub && top_m_candidates_end < M && traverse_count < L; ++sub) {
// for (int tid = 0; tid < num_threads_ && top_m_candidates_end < M && traverse_count < L; ++tid) {
// if (sub >= local_queues_ends[tid]) {
// continue;
// }
// idi index_set_L = tid * local_queue_length + sub;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
// }
//
// if (0 == top_m_candidates_end) {
// break;
// }
// }
//
//// idi nk = L;
// // Push M candidates' neighbors into the queue.
////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads)
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// if (0 != tid) {
// // Non-Master threads using local queues
// add_into_queue(
// set_L,
// (tid - 1) * local_queue_length,
// local_queues_ends[tid - 1],
// local_queue_length,
// cand);
// } else {
// // Thread 0 maintains the "global" queue
//// idi r =
// add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
//// if (r < nk) {
//// nk = r;
//// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
//// // Merge. Merge all queues in parallel.
// {
// time_merge_ -= WallTimer::get_time_mark();
// if (num_threads_ > 1) {
//// idi r = merge_all_queues_queue_base(
//// set_L,
//// local_queues_ends,
//// queue_base,
//// real_threads,
//// local_queue_length,
//// L);
//// idi r =
// merge_all_queues_para_array(
// set_L,
// local_queues_ends,
// local_queue_length,
// L);
//// if (r < nk) {
//// nk = r;
//// }
// }
// time_merge_ += WallTimer::get_time_mark();
// }
//// if (nk <= last_k) {
//// k = nk;
//// } else {
//// k = last_k + 1;
//// }
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
//
//
////#pragma omp parallel for
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// set_K[k_i] = set_L[k_i + base_set_L].id_;
////// set_K[k_i] = set_L[k_i].id_;
//// }
//
// {
// idi k_i = 0;
// idi bound_sub = K / num_threads_;
// for (idi sub = 0; sub < bound_sub; ++sub) {
// for (int tid = 0; tid < num_threads_; ++tid) {
// idi index_set_L = tid * local_queue_length + sub;
// set_K[k_i++] = set_L[index_set_L].id_;
// }
// }
// idi remain = K - k_i;
// if (remain) {
// for (int tid = 0; tid < remain; ++tid) {
// idi index_set_L = tid * local_queue_length + bound_sub;
// set_K[k_i++] = set_L[index_set_L].id_;
// }
// }
// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//
//// {//test
//// printf("tmp_count: %u\n", tmp_count);
//// if (3 == query_id) {
//// exit(1);
//// }
//// }
//}
} // namespace PANNS
#endif //BATCH_SEARCHING_SEARCHING_H
|
relu_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: bhu@openailab.com
*/
#include <math.h>
#include "sys_port.h"
#include "module.h"
#include "tengine_ir.h"
#include "../../cpu_node_ops.h"
#include "tengine_op.h"
#include "relu_param.h"
static int ref_relu_fp32(struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, float negative_slope,
int num_thread)
{
int batch = input_tensor->dims[0];
int channels = input_tensor->dims[1];
int h = input_tensor->dims[2];
int w = input_tensor->dims[3];
int size = h * w;
int c_step = h * w;
int batch_step = channels * c_step;
float* input_data = input_tensor->data;
float* out_data = output_tensor->data;
if (negative_slope == 0)
{
for (int n = 0; n < batch; n++)
{
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src = input_data + batch_step * n + c_step * q;
float* dst = out_data + batch_step * n + c_step * q;
for (int i = 0; i < size; i++)
{
if (src[i] < 0)
dst[i] = 0;
else
dst[i] = src[i];
}
}
}
}
else
{
for (int n = 0; n < batch; n++)
{
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src = input_data + batch_step * n + c_step * q;
float* dst = out_data + batch_step * n + c_step * q;
for (int i = 0; i < size; i++)
{
if (src[i] < 0)
dst[i] = src[i] * negative_slope;
else
dst[i] = src[i];
}
}
}
}
return 0;
}
static int ref_relu_uint8(struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, float negative_slope,
int num_thread)
{
int batch = input_tensor->dims[0];
int channels = input_tensor->dims[1];
int h = input_tensor->dims[2];
int w = input_tensor->dims[3];
int size = h * w;
int c_step = h * w;
int batch_step = channels * c_step;
int total_size = batch * batch_step;
/* dequant */
uint8_t* input_uint8 = input_tensor->data;
uint8_t* output_uint8 = output_tensor->data;
float input_scale = input_tensor->scale;
float output_scale = output_tensor->scale;
int32_t input_zero = input_tensor->zero_point;
int32_t output_zero = output_tensor->zero_point;
float* data_fp32 = (float*)sys_malloc(total_size * sizeof(float));
for(int i=0; i<total_size; i++)
{
data_fp32[i] = (input_uint8[i] - input_zero) * input_scale;
}
/* process */
if (negative_slope == 0)
{
for (int n = 0; n < batch; n++)
{
//#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src = data_fp32 + batch_step * n + c_step * q;
float* dst = data_fp32 + batch_step * n + c_step * q;
for (int i = 0; i < size; i++)
{
if (src[i] < 0)
dst[i] = 0;
else
dst[i] = src[i];
}
}
}
}
else
{
for (int n = 0; n < batch; n++)
{
//#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src = data_fp32 + batch_step * n + c_step * q;
float* dst = data_fp32 + batch_step * n + c_step * q;
for (int i = 0; i < size; i++)
{
if (src[i] < 0)
dst[i] = src[i] * negative_slope;
else
dst[i] = src[i];
}
}
}
}
/* quant */
for(int i=0; i<total_size; i++)
{
int udata = round(data_fp32[i] / output_scale + output_zero);
if (udata > 255)
udata = 255;
else if (udata < 0)
udata = 0;
output_uint8[i] = udata;
}
sys_free(data_fp32);
return 0;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor;
struct ir_tensor* output_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct relu_param* relu_param = ( struct relu_param* )ir_node->op.param_mem;
int ret = 0;
if (input_tensor->data_type == TENGINE_DT_FP32)
ret = ref_relu_fp32(input_tensor, output_tensor, relu_param->negative_slope, exec_graph->num_thread);
else
ret = ref_relu_uint8(input_tensor, output_tensor, relu_param->negative_slope, exec_graph->num_thread);
return ret;
}
static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* node = exec_node->ir_node;
struct ir_graph* ir_graph = node->graph;
struct ir_tensor* input = get_ir_graph_tensor(ir_graph, node->input_tensors[0]);
struct ir_tensor* output = get_ir_graph_tensor(ir_graph, node->output_tensors[0]);
int ret = set_ir_tensor_shape(output, input->dims, input->dim_num);
return ret;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node)
{
return OPS_SCORE_CANDO;
}
static struct node_ops hcl_node_ops = {.prerun = NULL,
.run = run,
.reshape = reshape,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
static int reg_relu_hcl_ops(void* arg)
{
return register_builtin_node_ops(OP_RELU, &hcl_node_ops);
}
static int unreg_relu_hcl_ops(void* arg)
{
return unregister_builtin_node_ops(OP_RELU, &hcl_node_ops);
}
AUTO_REGISTER_OPS(reg_relu_hcl_ops);
AUTO_UNREGISTER_OPS(unreg_relu_hcl_ops);
|
GB_unop__bnot_int8_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__bnot_int8_int8
// op(A') function: GB_unop_tran__bnot_int8_int8
// C type: int8_t
// A type: int8_t
// cast: int8_t cij = aij
// unaryop: cij = ~(aij)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ~(x) ;
// casting
#define GB_CAST(z, aij) \
int8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = aij ; \
Cx [pC] = ~(z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BNOT || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__bnot_int8_int8
(
int8_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = ~(z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__bnot_int8_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__lnot_int8_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int8_int16
// op(A') function: GB_tran__lnot_int8_int16
// C type: int8_t
// A type: int16_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
int8_t z = (int8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT8 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int8_int16
(
int8_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int8_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
wand-view.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% W W AAA N N DDDD %
% W W A A NN N D D %
% W W W AAAAA N N N D D %
% WW WW A A N NN D D %
% W W A A N N DDDD %
% %
% V V IIIII EEEEE W W %
% V V I E W W %
% V V I EEE W W W %
% V V I E WW WW %
% V IIIII EEEEE W W %
% %
% %
% MagickWand Wand View Methods %
% %
% Software Design %
% John Cristy %
% March 2003 %
% %
% %
% Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "wand/studio.h"
#include "wand/MagickWand.h"
#include "wand/magick-wand-private.h"
#include "wand/wand.h"
#include "magick/monitor-private.h"
#include "magick/thread-private.h"
/*
Define declarations.
*/
#define WandViewId "WandView"
/*
Typedef declarations.
*/
struct _WandView
{
size_t
id;
char
name[MaxTextExtent],
*description;
RectangleInfo
extent;
MagickWand
*wand;
CacheView
*view;
size_t
number_threads;
PixelWand
***pixel_wands;
ExceptionInfo
*exception;
MagickBooleanType
debug;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneWandView() makes a copy of the specified wand view.
%
% The format of the CloneWandView method is:
%
% WandView *CloneWandView(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport WandView *CloneWandView(const WandView *wand_view)
{
WandView
*clone_view;
register ssize_t
i;
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
if (wand_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name);
clone_view=(WandView *) AcquireMagickMemory(sizeof(*clone_view));
if (clone_view == (WandView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
wand_view->name);
(void) ResetMagickMemory(clone_view,0,sizeof(*clone_view));
clone_view->id=AcquireWandId();
(void) FormatLocaleString(clone_view->name,MaxTextExtent,"%s-%.20g",
WandViewId,(double) clone_view->id);
clone_view->description=ConstantString(wand_view->description);
clone_view->view=CloneCacheView(wand_view->view);
clone_view->extent=wand_view->extent;
clone_view->number_threads=wand_view->number_threads;
clone_view->exception=AcquireExceptionInfo();
InheritException(clone_view->exception,wand_view->exception);
for (i=0; i < (ssize_t) wand_view->number_threads; i++)
clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **)
wand_view->pixel_wands[i],wand_view->extent.width);
clone_view->debug=wand_view->debug;
if (clone_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name);
clone_view->signature=WandSignature;
return(clone_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyWandView() deallocates memory associated with a wand view.
%
% The format of the DestroyWandView method is:
%
% WandView *DestroyWandView(WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands,
const size_t number_wands,const size_t number_threads)
{
register ssize_t
i;
assert(pixel_wands != (PixelWand ***) NULL);
for (i=0; i < (ssize_t) number_threads; i++)
if (pixel_wands[i] != (PixelWand **) NULL)
pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands);
pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands);
return(pixel_wands);
}
WandExport WandView *DestroyWandView(WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
wand_view->pixel_wands=DestroyPixelsThreadSet(wand_view->pixel_wands,
wand_view->extent.width,wand_view->number_threads);
wand_view->view=DestroyCacheView(wand_view->view);
wand_view->exception=DestroyExceptionInfo(wand_view->exception);
wand_view->signature=(~WandSignature);
RelinquishWandId(wand_view->id);
wand_view=(WandView *) RelinquishMagickMemory(wand_view);
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D u p l e x T r a n s f e r W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DuplexTransferWandViewIterator() iterates over three wand views in
% parallel and calls your transfer method for each scanline of the view. The
% source and duplex pixel extent is not confined to the image canvas-- that is
% you can include negative offsets or widths or heights that exceed the image
% dimension. However, the destination wand view is confined to the image
% canvas-- that is no negative offsets or widths or heights that exceed the
% image dimension are permitted.
%
% The callback signature is:
%
% MagickBooleanType DuplexTransferImageViewMethod(const WandView *source,
% const WandView *duplex,WandView *destination,const ssize_t y,
% const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the DuplexTransferWandViewIterator method is:
%
% MagickBooleanType DuplexTransferWandViewIterator(WandView *source,
% WandView *duplex,WandView *destination,
% DuplexTransferWandViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o duplex: the duplex wand view.
%
% o destination: the destination wand view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType DuplexTransferWandViewIterator(WandView *source,
WandView *duplex,WandView *destination,DuplexTransferWandViewMethod transfer,
void *context)
{
ExceptionInfo
*exception;
Image
*destination_image,
*duplex_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == WandSignature);
if (transfer == (DuplexTransferWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
duplex_image=duplex->wand->images;
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (source->extent.height-source->extent.y);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(source_image,destination_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*restrict duplex_indexes,
*restrict indexes;
register const PixelPacket
*restrict duplex_pixels,
*restrict pixels;
register IndexPacket
*restrict destination_indexes;
register ssize_t
x;
register PixelPacket
*restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelBlack(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y,
duplex->extent.width,1,duplex->exception);
if (duplex_pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view);
for (x=0; x < (ssize_t) duplex->extent.width; x++)
PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x);
if (duplex_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) duplex->extent.width; x++)
PixelSetBlackQuantum(duplex->pixel_wands[id][x],
GetPixelBlack(duplex_indexes+x));
if (duplex_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) duplex->extent.width; x++)
PixelSetIndex(duplex->pixel_wands[id][x],
GetPixelIndex(duplex_indexes+x));
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view);
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetBlackQuantum(destination->pixel_wands[id][x],
GetPixelBlack(destination_indexes+x));
if (destination_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetIndex(destination->pixel_wands[id][x],
GetPixelIndex(destination_indexes+x));
if (transfer(source,duplex,destination,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
SetPixelBlack(destination_indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_DuplexTransferWandViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewException() returns the severity, reason, and description of any
% error that occurs when utilizing a wand view.
%
% The format of the GetWandViewException method is:
%
% char *GetWandViewException(const WandView *wand_view,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o wand_view: the pixel wand_view.
%
% o severity: the severity of the error is returned here.
%
*/
WandExport char *GetWandViewException(const WandView *wand_view,
ExceptionType *severity)
{
char
*description;
assert(wand_view != (const WandView *) NULL);
assert(wand_view->signature == WandSignature);
if (wand_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name);
assert(severity != (ExceptionType *) NULL);
*severity=wand_view->exception->severity;
description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent,
sizeof(*description));
if (description == (char *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
wand_view->name);
*description='\0';
if (wand_view->exception->reason != (char *) NULL)
(void) CopyMagickString(description,GetLocaleExceptionMessage(
wand_view->exception->severity,wand_view->exception->reason),
MaxTextExtent);
if (wand_view->exception->description != (char *) NULL)
{
(void) ConcatenateMagickString(description," (",MaxTextExtent);
(void) ConcatenateMagickString(description,GetLocaleExceptionMessage(
wand_view->exception->severity,wand_view->exception->description),
MaxTextExtent);
(void) ConcatenateMagickString(description,")",MaxTextExtent);
}
return(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewExtent() returns the wand view extent.
%
% The format of the GetWandViewExtent method is:
%
% RectangleInfo GetWandViewExtent(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport RectangleInfo GetWandViewExtent(const WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
return(wand_view->extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewIterator() iterates over the wand view in parallel and calls
% your get method for each scanline of the view. The pixel extent is
% not confined to the image canvas-- that is you can include negative offsets
% or widths or heights that exceed the image dimension. Any updates to
% the pixels in your callback are ignored.
%
% The callback signature is:
%
% MagickBooleanType GetImageViewMethod(const WandView *source,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback get method that must be
% executed by a single thread at a time.
%
% The format of the GetWandViewIterator method is:
%
% MagickBooleanType GetWandViewIterator(WandView *source,
% GetWandViewMethod get,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o get: the get callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType GetWandViewIterator(WandView *source,
GetWandViewMethod get,void *context)
{
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == WandSignature);
if (get == (GetWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (source->extent.height-source->extent.y);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(source_image,source_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*indexes;
register const PixelPacket
*pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelBlack(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (get(source,y,id,context) == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_GetWandViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewPixels() returns the wand view pixel_wands.
%
% The format of the GetWandViewPixels method is:
%
% PixelWand *GetWandViewPixels(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport PixelWand **GetWandViewPixels(const WandView *wand_view)
{
const int
id = GetOpenMPThreadId();
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
return(wand_view->pixel_wands[id]);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w W a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewWand() returns the magick wand associated with the wand view.
%
% The format of the GetWandViewWand method is:
%
% MagickWand *GetWandViewWand(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport MagickWand *GetWandViewWand(const WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
return(wand_view->wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsWandView() returns MagickTrue if the the parameter is verified as a wand
% view object.
%
% The format of the IsWandView method is:
%
% MagickBooleanType IsWandView(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport MagickBooleanType IsWandView(const WandView *wand_view)
{
size_t
length;
if (wand_view == (const WandView *) NULL)
return(MagickFalse);
if (wand_view->signature != WandSignature)
return(MagickFalse);
length=strlen(WandViewId);
if (LocaleNCompare(wand_view->name,WandViewId,length) != 0)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewWandView() returns a wand view required for all other methods in the
% Wand View API.
%
% The format of the NewWandView method is:
%
% WandView *NewWandView(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the wand.
%
*/
static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands,
const size_t number_threads)
{
PixelWand
***pixel_wands;
register ssize_t
i;
pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads,
sizeof(*pixel_wands));
if (pixel_wands == (PixelWand ***) NULL)
return((PixelWand ***) NULL);
(void) ResetMagickMemory(pixel_wands,0,number_threads*sizeof(*pixel_wands));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_wands[i]=NewPixelWands(number_wands);
if (pixel_wands[i] == (PixelWand **) NULL)
return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads));
}
return(pixel_wands);
}
WandExport WandView *NewWandView(MagickWand *wand)
{
WandView
*wand_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view));
if (wand_view == (WandView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) ResetMagickMemory(wand_view,0,sizeof(*wand_view));
wand_view->id=AcquireWandId();
(void) FormatLocaleString(wand_view->name,MaxTextExtent,"%s-%.20g",
WandViewId,(double) wand_view->id);
wand_view->description=ConstantString("WandView");
wand_view->wand=wand;
wand_view->exception=AcquireExceptionInfo();
wand_view->view=AcquireVirtualCacheView(wand_view->wand->images,
wand_view->exception);
wand_view->extent.width=wand->images->columns;
wand_view->extent.height=wand->images->rows;
wand_view->number_threads=GetOpenMPMaximumThreads();
wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width,
wand_view->number_threads);
if (wand_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
wand_view->debug=IsEventLogging();
wand_view->signature=WandSignature;
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w W a n d V i e w E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewWandViewExtent() returns a wand view required for all other methods
% in the Wand View API.
%
% The format of the NewWandViewExtent method is:
%
% WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x,
% const ssize_t y,const size_t width,const size_t height)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x,y,columns,rows: These values define the perimeter of a extent of
% pixel_wands view.
%
*/
WandExport WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x,
const ssize_t y,const size_t width,const size_t height)
{
WandView
*wand_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view));
if (wand_view == (WandView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) ResetMagickMemory(wand_view,0,sizeof(*wand_view));
wand_view->id=AcquireWandId();
(void) FormatLocaleString(wand_view->name,MaxTextExtent,"%s-%.20g",
WandViewId,(double) wand_view->id);
wand_view->description=ConstantString("WandView");
wand_view->exception=AcquireExceptionInfo();
wand_view->view=AcquireVirtualCacheView(wand_view->wand->images,
wand_view->exception);
wand_view->wand=wand;
wand_view->extent.width=width;
wand_view->extent.height=height;
wand_view->extent.x=x;
wand_view->extent.y=y;
wand_view->number_threads=GetOpenMPMaximumThreads();
wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width,
wand_view->number_threads);
if (wand_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
wand_view->debug=IsEventLogging();
wand_view->signature=WandSignature;
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t W a n d V i e w D e s c r i p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetWandViewDescription() associates a description with an image view.
%
% The format of the SetWandViewDescription method is:
%
% void SetWandViewDescription(WandView *image_view,const char *description)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
% o description: the wand view description.
%
*/
MagickExport void SetWandViewDescription(WandView *wand_view,
const char *description)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
wand_view->description=ConstantString(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetWandViewIterator() iterates over the wand view in parallel and calls
% your set method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension. The pixels are initiallly
% undefined and any settings you make in the callback method are automagically
% synced back to your image.
%
% The callback signature is:
%
% MagickBooleanType SetImageViewMethod(ImageView *destination,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback set method that must be
% executed by a single thread at a time.
%
% The format of the SetWandViewIterator method is:
%
% MagickBooleanType SetWandViewIterator(WandView *destination,
% SetWandViewMethod set,void *context)
%
% A description of each parameter follows:
%
% o destination: the wand view.
%
% o set: the set callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType SetWandViewIterator(WandView *destination,
SetWandViewMethod set,void *context)
{
ExceptionInfo
*exception;
Image
*destination_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(destination != (WandView *) NULL);
assert(destination->signature == WandSignature);
if (set == (SetWandViewMethod) NULL)
return(MagickFalse);
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (destination->extent.height-destination->extent.y);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(destination_image,destination_image,height,1)
#endif
for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x,
y,destination->extent.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(destination->view);
if (set(destination,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
SetPixelBlack(indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
}
if (destination_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_SetWandViewIterator)
#endif
proceed=SetImageProgress(destination_image,destination->description,
progress++,destination->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t W a n d V i e w T h r e a d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetWandViewThreads() sets the number of threads in a thread team.
%
% The format of the SetWandViewDescription method is:
%
% void SetWandViewThreads(WandView *image_view,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
% o number_threads: the number of threads in a thread team.
%
*/
MagickExport void SetWandViewThreads(WandView *image_view,
const size_t number_threads)
{
assert(image_view != (WandView *) NULL);
assert(image_view->signature == MagickSignature);
image_view->number_threads=number_threads;
if (number_threads > (size_t) GetMagickResourceLimit(ThreadResource))
image_view->number_threads=GetOpenMPMaximumThreads();
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f e r W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransferWandViewIterator() iterates over two wand views in parallel and
% calls your transfer method for each scanline of the view. The source pixel
% extent is not confined to the image canvas-- that is you can include
% negative offsets or widths or heights that exceed the image dimension.
% However, the destination wand view is confined to the image canvas-- that
% is no negative offsets or widths or heights that exceed the image dimension
% are permitted.
%
% The callback signature is:
%
% MagickBooleanType TransferImageViewMethod(const WandView *source,
% WandView *destination,const ssize_t y,const int thread_id,
% void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the TransferWandViewIterator method is:
%
% MagickBooleanType TransferWandViewIterator(WandView *source,
% WandView *destination,TransferWandViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o destination: the destination wand view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType TransferWandViewIterator(WandView *source,
WandView *destination,TransferWandViewMethod transfer,void *context)
{
ExceptionInfo
*exception;
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == WandSignature);
if (transfer == (TransferWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (source->extent.height-source->extent.y);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(source_image,destination_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict pixels;
register IndexPacket
*restrict destination_indexes;
register ssize_t
x;
register PixelPacket
*restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelBlack(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view);
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetBlackQuantum(destination->pixel_wands[id][x],
GetPixelBlack(indexes+x));
if (destination_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetIndex(destination->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (transfer(source,destination,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
SetPixelBlack(destination_indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_TransferWandViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U p d a t e W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UpdateWandViewIterator() iterates over the wand view in parallel and calls
% your update method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension are permitted. Updates to pixels
% in your callback are automagically synced back to the image.
%
% The callback signature is:
%
% MagickBooleanType UpdateImageViewMethod(WandView *source,const ssize_t y,
% const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback update method that must be
% executed by a single thread at a time.
%
% The format of the UpdateWandViewIterator method is:
%
% MagickBooleanType UpdateWandViewIterator(WandView *source,
% UpdateWandViewMethod update,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o update: the update callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType UpdateWandViewIterator(WandView *source,
UpdateWandViewMethod update,void *context)
{
ExceptionInfo
*exception;
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == WandSignature);
if (update == (UpdateWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
if (SetImageStorageClass(source_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=source->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (source->extent.height-source->extent.y);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(source_image,source_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y,
source->extent.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(source->exception,GetCacheViewException(
source->view));
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(source->view);
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelBlack(indexes+x));
if (update(source,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
SetPixelBlack(indexes+x,PixelGetBlackQuantum(
source->pixel_wands[id][x]));
if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse)
{
InheritException(source->exception,GetCacheViewException(source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_UpdateWandViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
|
_cutils.c | /* Generated by Cython 0.29.21 */
/* BEGIN: Cython Metadata
{
"distutils": {
"depends": [],
"extra_compile_args": [
"-fopenmp"
],
"extra_link_args": [
"-fopenmp"
],
"name": "skbio.stats.ordination._cutils",
"sources": [
"skbio/stats/ordination/_cutils.pyx"
]
},
"module_name": "skbio.stats.ordination._cutils"
}
END: Cython Metadata */
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
#error Cython requires Python 2.6+ or Python 3.3+.
#else
#define CYTHON_ABI "0_29_21"
#define CYTHON_HEX_VERSION 0x001D15F0
#define CYTHON_FUTURE_DIVISION 0
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#define __PYX_COMMA ,
#ifndef HAVE_LONG_LONG
#if PY_VERSION_HEX >= 0x02070000
#define HAVE_LONG_LONG
#endif
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 0
#undef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 0
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#if PY_VERSION_HEX < 0x03050000
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#undef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#undef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#elif defined(PYSTON_VERSION)
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
#define CYTHON_USE_PYTYPE_LOOKUP 1
#endif
#if PY_MAJOR_VERSION < 3
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#elif !defined(CYTHON_USE_PYLONG_INTERNALS)
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#ifndef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 1
#endif
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#if PY_VERSION_HEX < 0x030300F0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#elif !defined(CYTHON_USE_UNICODE_WRITER)
#define CYTHON_USE_UNICODE_WRITER 1
#endif
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#ifndef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 1
#endif
#ifndef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 1
#endif
#ifndef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)
#endif
#ifndef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
#endif
#ifndef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)
#endif
#ifndef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)
#endif
#endif
#if !defined(CYTHON_FAST_PYCCALL)
#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#undef SHIFT
#undef BASE
#undef MASK
#ifdef SIZEOF_VOID_P
enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
#endif
#endif
#ifndef __has_attribute
#define __has_attribute(x) 0
#endif
#ifndef __has_cpp_attribute
#define __has_cpp_attribute(x) 0
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_MAYBE_UNUSED_VAR
# if defined(__cplusplus)
template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
# else
# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifdef _MSC_VER
#ifndef _MSC_STDINT_H_
#if _MSC_VER < 1300
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
#else
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
#endif
#else
#include <stdint.h>
#endif
#ifndef CYTHON_FALLTHROUGH
#if defined(__cplusplus) && __cplusplus >= 201103L
#if __has_cpp_attribute(fallthrough)
#define CYTHON_FALLTHROUGH [[fallthrough]]
#elif __has_cpp_attribute(clang::fallthrough)
#define CYTHON_FALLTHROUGH [[clang::fallthrough]]
#elif __has_cpp_attribute(gnu::fallthrough)
#define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
#endif
#endif
#ifndef CYTHON_FALLTHROUGH
#if __has_attribute(fallthrough)
#define CYTHON_FALLTHROUGH __attribute__((fallthrough))
#else
#define CYTHON_FALLTHROUGH
#endif
#endif
#if defined(__clang__ ) && defined(__apple_build_version__)
#if __apple_build_version__ < 7000000
#undef CYTHON_FALLTHROUGH
#define CYTHON_FALLTHROUGH
#endif
#endif
#endif
#ifndef CYTHON_INLINE
#if defined(__clang__)
#define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
#elif defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#else
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#endif
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#ifndef METH_STACKLESS
#define METH_STACKLESS 0
#endif
#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
#endif
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
#define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
#endif
#if CYTHON_FAST_PYCCALL
#define __Pyx_PyFastCFunction_Check(func)\
((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))
#else
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1
#define PyMem_RawMalloc(n) PyMem_Malloc(n)
#define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n)
#define PyMem_RawFree(p) PyMem_Free(p)
#endif
#if CYTHON_COMPILING_IN_PYSTON
#define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
#else
#define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
#endif
#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#elif PY_VERSION_HEX >= 0x03060000
#define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
#elif PY_VERSION_HEX >= 0x03000000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#else
#define __Pyx_PyThreadState_Current _PyThreadState_Current
#endif
#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
#include "pythread.h"
#define Py_tss_NEEDS_INIT 0
typedef int Py_tss_t;
static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
*key = PyThread_create_key();
return 0;
}
static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
*key = Py_tss_NEEDS_INIT;
return key;
}
static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
PyObject_Free(key);
}
static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
return *key != Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
PyThread_delete_key(*key);
*key = Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
return PyThread_set_key_value(*key, value);
}
static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
return PyThread_get_key_value(*key);
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
#else
#define __Pyx_PyDict_NewPresized(n) PyDict_New()
#endif
#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
#else
#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
#if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#else
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u))
#endif
#else
#define CYTHON_PEP393_ENABLED 0
#define PyUnicode_1BYTE_KIND 1
#define PyUnicode_2BYTE_KIND 2
#define PyUnicode_4BYTE_KIND 4
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
#define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#ifndef PyObject_Unicode
#define PyObject_Unicode PyObject_Str
#endif
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#if PY_VERSION_HEX >= 0x030900A4
#define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt)
#define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size)
#else
#define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt)
#define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size)
#endif
#if CYTHON_ASSUME_SAFE_MACROS
#define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
#else
#define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#if CYTHON_USE_ASYNC_SLOTS
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#else
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#endif
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef __Pyx_PyAsyncMethodsStruct
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif
#define __PYX_MARK_ERR_POS(f_index, lineno) \
{ __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; }
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__skbio__stats__ordination___cutils
#define __PYX_HAVE_API__skbio__stats__ordination___cutils
/* Early includes */
#include "pythread.h"
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include "pystate.h"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
#define CYTHON_WITHOUT_ASSERTIONS
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
return (size_t) i < (size_t) limit;
}
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER)
#define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
#define __Pyx_PySequence_Tuple(obj)\
(likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_ASSUME_SAFE_MACROS
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
static PyObject *__pyx_m = NULL;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_cython_runtime = NULL;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
static const char *__pyx_f[] = {
"skbio/stats/ordination/_cutils.pyx",
"stringsource",
};
/* NoFastGil.proto */
#define __Pyx_PyGILState_Ensure PyGILState_Ensure
#define __Pyx_PyGILState_Release PyGILState_Release
#define __Pyx_FastGIL_Remember()
#define __Pyx_FastGIL_Forget()
#define __Pyx_FastGilFuncInit()
/* ForceInitThreads.proto */
#ifndef __PYX_FORCE_INIT_THREADS
#define __PYX_FORCE_INIT_THREADS 0
#endif
/* MemviewSliceStruct.proto */
struct __pyx_memoryview_obj;
typedef struct {
struct __pyx_memoryview_obj *memview;
char *data;
Py_ssize_t shape[8];
Py_ssize_t strides[8];
Py_ssize_t suboffsets[8];
} __Pyx_memviewslice;
#define __Pyx_MemoryView_Len(m) (m.shape[0])
/* Atomics.proto */
#include <pythread.h>
#ifndef CYTHON_ATOMICS
#define CYTHON_ATOMICS 1
#endif
#define __pyx_atomic_int_type int
#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\
(__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\
!defined(__i386__)
#define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1)
#define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using GNU atomics"
#endif
#elif CYTHON_ATOMICS && defined(_MSC_VER) && 0
#include <Windows.h>
#undef __pyx_atomic_int_type
#define __pyx_atomic_int_type LONG
#define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#pragma message ("Using MSVC atomics")
#endif
#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0
#define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using Intel atomics"
#endif
#else
#undef CYTHON_ATOMICS
#define CYTHON_ATOMICS 0
#ifdef __PYX_DEBUG_ATOMICS
#warning "Not using atomics"
#endif
#endif
typedef volatile __pyx_atomic_int_type __pyx_atomic_int;
#if CYTHON_ATOMICS
#define __pyx_add_acquisition_count(memview)\
__pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview)\
__pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#else
#define __pyx_add_acquisition_count(memview)\
__pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview)\
__pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#endif
/* BufferFormatStructs.proto */
#define IS_UNSIGNED(type) (((type) -1) > 0)
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name;
struct __Pyx_StructField_* fields;
size_t size;
size_t arraysize[8];
int ndim;
char typegroup;
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
/*--- Type declarations ---*/
struct __pyx_array_obj;
struct __pyx_MemviewEnum_obj;
struct __pyx_memoryview_obj;
struct __pyx_memoryviewslice_obj;
/* "View.MemoryView":105
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_array_obj {
PyObject_HEAD
struct __pyx_vtabstruct_array *__pyx_vtab;
char *data;
Py_ssize_t len;
char *format;
int ndim;
Py_ssize_t *_shape;
Py_ssize_t *_strides;
Py_ssize_t itemsize;
PyObject *mode;
PyObject *_format;
void (*callback_free_data)(void *);
int free_data;
int dtype_is_object;
};
/* "View.MemoryView":279
*
* @cname('__pyx_MemviewEnum')
* cdef class Enum(object): # <<<<<<<<<<<<<<
* cdef object name
* def __init__(self, name):
*/
struct __pyx_MemviewEnum_obj {
PyObject_HEAD
PyObject *name;
};
/* "View.MemoryView":330
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_memoryview_obj {
PyObject_HEAD
struct __pyx_vtabstruct_memoryview *__pyx_vtab;
PyObject *obj;
PyObject *_size;
PyObject *_array_interface;
PyThread_type_lock lock;
__pyx_atomic_int acquisition_count[2];
__pyx_atomic_int *acquisition_count_aligned_p;
Py_buffer view;
int flags;
int dtype_is_object;
__Pyx_TypeInfo *typeinfo;
};
/* "View.MemoryView":965
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_memoryviewslice_obj {
struct __pyx_memoryview_obj __pyx_base;
__Pyx_memviewslice from_slice;
PyObject *from_object;
PyObject *(*to_object_func)(char *);
int (*to_dtype_func)(char *, PyObject *);
};
/* "View.MemoryView":105
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_vtabstruct_array {
PyObject *(*get_memview)(struct __pyx_array_obj *);
};
static struct __pyx_vtabstruct_array *__pyx_vtabptr_array;
/* "View.MemoryView":330
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_vtabstruct_memoryview {
char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *);
PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *);
};
static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview;
/* "View.MemoryView":965
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_vtabstruct__memoryviewslice {
struct __pyx_vtabstruct_memoryview __pyx_base;
};
static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice;
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* PyObjectGetAttrStr.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
/* RaiseDoubleKeywords.proto */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
/* ParseKeywords.proto */
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
const char* function_name);
/* PyDictContains.proto */
static CYTHON_INLINE int __Pyx_PyDict_ContainsTF(PyObject* item, PyObject* dict, int eq) {
int result = PyDict_Contains(dict, item);
return unlikely(result < 0) ? result : (result == (eq == Py_EQ));
}
/* DictGetItem.proto */
#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key);
#define __Pyx_PyObject_Dict_GetItem(obj, name)\
(likely(PyDict_CheckExact(obj)) ?\
__Pyx_PyDict_GetItem(obj, name) : PyObject_GetItem(obj, name))
#else
#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
#define __Pyx_PyObject_Dict_GetItem(obj, name) PyObject_GetItem(obj, name)
#endif
/* PyCFunctionFastCall.proto */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
#else
#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
#endif
/* PyFunctionFastCall.proto */
#if CYTHON_FAST_PYCALL
#define __Pyx_PyFunction_FastCall(func, args, nargs)\
__Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs);
#else
#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
#endif
#define __Pyx_BUILD_ASSERT_EXPR(cond)\
(sizeof(char [1 - 2*!(cond)]) - 1)
#ifndef Py_MEMBER_SIZE
#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
#endif
static size_t __pyx_pyframe_localsplus_offset = 0;
#include "frameobject.h"
#define __Pxy_PyFrame_Initialize_Offsets()\
((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\
(void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))
#define __Pyx_PyFrame_GetLocalsplus(frame)\
(assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))
#endif
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* PyObjectCallMethO.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif
/* PyObjectCallOneArg.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
/* PyThreadStateGet.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#define __Pyx_PyErr_Occurred() PyErr_Occurred()
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
#else
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#endif
#else
#define __Pyx_PyErr_Clear() PyErr_Clear()
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* UnicodeAsUCS4.proto */
static CYTHON_INLINE Py_UCS4 __Pyx_PyUnicode_AsPy_UCS4(PyObject*);
/* object_ord.proto */
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyObject_Ord(c)\
(likely(PyUnicode_Check(c)) ? (long)__Pyx_PyUnicode_AsPy_UCS4(c) : __Pyx__PyObject_Ord(c))
#else
#define __Pyx_PyObject_Ord(c) __Pyx__PyObject_Ord(c)
#endif
static long __Pyx__PyObject_Ord(PyObject* c);
/* SetItemInt.proto */
#define __Pyx_SetItemInt(o, i, v, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_SetItemInt_Fast(o, (Py_ssize_t)i, v, is_list, wraparound, boundscheck) :\
(is_list ? (PyErr_SetString(PyExc_IndexError, "list assignment index out of range"), -1) :\
__Pyx_SetItemInt_Generic(o, to_py_func(i), v)))
static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v);
static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v,
int is_list, int wraparound, int boundscheck);
/* IterFinish.proto */
static CYTHON_INLINE int __Pyx_IterFinish(void);
/* PyObjectCallNoArg.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func);
#else
#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL)
#endif
/* PyObjectGetMethod.proto */
static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method);
/* PyObjectCallMethod0.proto */
static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name);
/* RaiseNeedMoreValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
/* RaiseTooManyValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
/* UnpackItemEndCheck.proto */
static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected);
/* RaiseNoneIterError.proto */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
/* UnpackTupleError.proto */
static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index);
/* UnpackTuple2.proto */
#define __Pyx_unpack_tuple2(tuple, value1, value2, is_tuple, has_known_size, decref_tuple)\
(likely(is_tuple || PyTuple_Check(tuple)) ?\
(likely(has_known_size || PyTuple_GET_SIZE(tuple) == 2) ?\
__Pyx_unpack_tuple2_exact(tuple, value1, value2, decref_tuple) :\
(__Pyx_UnpackTupleError(tuple, 2), -1)) :\
__Pyx_unpack_tuple2_generic(tuple, value1, value2, has_known_size, decref_tuple))
static CYTHON_INLINE int __Pyx_unpack_tuple2_exact(
PyObject* tuple, PyObject** value1, PyObject** value2, int decref_tuple);
static int __Pyx_unpack_tuple2_generic(
PyObject* tuple, PyObject** value1, PyObject** value2, int has_known_size, int decref_tuple);
/* dict_iter.proto */
static CYTHON_INLINE PyObject* __Pyx_dict_iterator(PyObject* dict, int is_dict, PyObject* method_name,
Py_ssize_t* p_orig_length, int* p_is_dict);
static CYTHON_INLINE int __Pyx_dict_iter_next(PyObject* dict_or_iter, Py_ssize_t orig_length, Py_ssize_t* ppos,
PyObject** pkey, PyObject** pvalue, PyObject** pitem, int is_dict);
/* PyObjectCall2Args.proto */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2);
/* GetItemInt.proto */
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
(is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
__Pyx_GetItemInt_Generic(o, to_py_func(i))))
#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck);
/* ListAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
__Pyx_SET_SIZE(list, len + 1);
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_PyList_Append(L,x) PyList_Append(L,x)
#endif
/* MemviewSliceInit.proto */
#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d
#define __Pyx_MEMVIEW_DIRECT 1
#define __Pyx_MEMVIEW_PTR 2
#define __Pyx_MEMVIEW_FULL 4
#define __Pyx_MEMVIEW_CONTIG 8
#define __Pyx_MEMVIEW_STRIDED 16
#define __Pyx_MEMVIEW_FOLLOW 32
#define __Pyx_IS_C_CONTIG 1
#define __Pyx_IS_F_CONTIG 2
static int __Pyx_init_memviewslice(
struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference);
static CYTHON_INLINE int __pyx_add_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
#define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p)
#define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview))
#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__)
#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__)
static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int);
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int);
/* PyDictVersioning.proto */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
(version_var) = __PYX_GET_DICT_VERSION(dict);\
(cache_var) = (value);
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
(VAR) = __pyx_dict_cached_value;\
} else {\
(VAR) = __pyx_dict_cached_value = (LOOKUP);\
__pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
}\
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
#else
#define __PYX_GET_DICT_VERSION(dict) (0)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
#endif
/* GetModuleGlobalName.proto */
#if CYTHON_USE_DICT_VERSIONS
#define __Pyx_GetModuleGlobalName(var, name) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
(var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\
(likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\
__Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
#define __Pyx_GetModuleGlobalNameUncached(var, name) {\
PY_UINT64_T __pyx_dict_version;\
PyObject *__pyx_dict_cached_value;\
(var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value);
#else
#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name)
#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name)
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name);
#endif
/* ArgTypeTest.proto */
#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\
((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\
__Pyx__ArgTypeTest(obj, type, name, exact))
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact);
/* IncludeStringH.proto */
#include <string.h>
/* BytesEquals.proto */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals);
/* UnicodeEquals.proto */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals);
/* StrEquals.proto */
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals
#else
#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals
#endif
/* None.proto */
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t);
/* UnaryNegOverflows.proto */
#define UNARY_NEG_WOULD_OVERFLOW(x)\
(((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/
/* GetAttr.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *);
/* ObjectGetItem.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key);
#else
#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key)
#endif
/* decode_c_string_utf16.proto */
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 0;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = -1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
/* decode_c_string.proto */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors));
/* PyErrExceptionMatches.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
#else
#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
#endif
/* GetAttr3.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *);
/* ExtTypeTest.proto */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
/* GetTopmostException.proto */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate);
#endif
/* SaveResetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
#else
#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
#endif
/* GetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* SwapException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* Import.proto */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
/* FastTypeChecks.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
#else
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
#endif
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
/* ListCompAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len)) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
__Pyx_SET_SIZE(list, len + 1);
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x)
#endif
/* PyIntBinop.proto */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check);
#else
#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\
(inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2))
#endif
/* ListExtend.proto */
static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) {
#if CYTHON_COMPILING_IN_CPYTHON
PyObject* none = _PyList_Extend((PyListObject*)L, v);
if (unlikely(!none))
return -1;
Py_DECREF(none);
return 0;
#else
return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v);
#endif
}
/* None.proto */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
/* None.proto */
static CYTHON_INLINE long __Pyx_div_long(long, long);
/* ImportFrom.proto */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);
/* HasAttr.proto */
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *);
/* PyObject_GenericGetAttrNoDict.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr
#endif
/* PyObject_GenericGetAttr.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr
#endif
/* SetVTable.proto */
static int __Pyx_SetVtable(PyObject *dict, void *vtable);
/* PyObjectGetAttrStrNoError.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name);
/* SetupReduce.proto */
static int __Pyx_setup_reduce(PyObject* type_obj);
/* FetchCommonType.proto */
static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type);
/* CythonFunctionShared.proto */
#define __Pyx_CyFunction_USED 1
#define __Pyx_CYFUNCTION_STATICMETHOD 0x01
#define __Pyx_CYFUNCTION_CLASSMETHOD 0x02
#define __Pyx_CYFUNCTION_CCLASS 0x04
#define __Pyx_CyFunction_GetClosure(f)\
(((__pyx_CyFunctionObject *) (f))->func_closure)
#define __Pyx_CyFunction_GetClassObj(f)\
(((__pyx_CyFunctionObject *) (f))->func_classobj)
#define __Pyx_CyFunction_Defaults(type, f)\
((type *)(((__pyx_CyFunctionObject *) (f))->defaults))
#define __Pyx_CyFunction_SetDefaultsGetter(f, g)\
((__pyx_CyFunctionObject *) (f))->defaults_getter = (g)
typedef struct {
PyCFunctionObject func;
#if PY_VERSION_HEX < 0x030500A0
PyObject *func_weakreflist;
#endif
PyObject *func_dict;
PyObject *func_name;
PyObject *func_qualname;
PyObject *func_doc;
PyObject *func_globals;
PyObject *func_code;
PyObject *func_closure;
PyObject *func_classobj;
void *defaults;
int defaults_pyobjects;
size_t defaults_size; // used by FusedFunction for copying defaults
int flags;
PyObject *defaults_tuple;
PyObject *defaults_kwdict;
PyObject *(*defaults_getter)(PyObject *);
PyObject *func_annotations;
} __pyx_CyFunctionObject;
static PyTypeObject *__pyx_CyFunctionType = 0;
#define __Pyx_CyFunction_Check(obj) (__Pyx_TypeCheck(obj, __pyx_CyFunctionType))
static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject* op, PyMethodDef *ml,
int flags, PyObject* qualname,
PyObject *self,
PyObject *module, PyObject *globals,
PyObject* code);
static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *m,
size_t size,
int pyobjects);
static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *m,
PyObject *tuple);
static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *m,
PyObject *dict);
static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *m,
PyObject *dict);
static int __pyx_CyFunction_init(void);
/* FusedFunction.proto */
typedef struct {
__pyx_CyFunctionObject func;
PyObject *__signatures__;
PyObject *type;
PyObject *self;
} __pyx_FusedFunctionObject;
static PyObject *__pyx_FusedFunction_New(PyMethodDef *ml, int flags,
PyObject *qualname, PyObject *closure,
PyObject *module, PyObject *globals,
PyObject *code);
static int __pyx_FusedFunction_clear(__pyx_FusedFunctionObject *self);
static PyTypeObject *__pyx_FusedFunctionType = NULL;
static int __pyx_FusedFunction_init(void);
#define __Pyx_FusedFunction_USED
/* CLineInTraceback.proto */
#ifdef CYTHON_CLINE_IN_TRACEBACK
#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
#else
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
#endif
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
/* BufferStructDeclare.proto */
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;
/* MemviewSliceIsContig.proto */
static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim);
/* OverlappingSlices.proto */
static int __pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize);
/* Capsule.proto */
static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig);
/* IsLittleEndian.proto */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void);
/* BufferFormatCheck.proto */
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts);
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type);
/* TypeInfoCompare.proto */
static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b);
/* MemviewSliceValidateAndInit.proto */
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_float(PyObject *, int writable_flag);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *, int writable_flag);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_float(PyObject *, int writable_flag);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_double(PyObject *, int writable_flag);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* MemviewDtypeToObject.proto */
static CYTHON_INLINE PyObject *__pyx_memview_get_float(const char *itemp);
static CYTHON_INLINE int __pyx_memview_set_float(const char *itemp, PyObject *obj);
/* MemviewDtypeToObject.proto */
static CYTHON_INLINE PyObject *__pyx_memview_get_double(const char *itemp);
static CYTHON_INLINE int __pyx_memview_set_double(const char *itemp, PyObject *obj);
/* MemviewSliceCopyTemplate.proto */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object);
/* BytesContains.proto */
static CYTHON_INLINE int __Pyx_BytesContains(PyObject* bytes, char character);
/* ImportNumPyArray.proto */
static PyObject *__pyx_numpy_ndarray = NULL;
static PyObject* __Pyx_ImportNumPyArrayTypeIfAvailable(void);
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
/* CIntFromPy.proto */
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *);
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
/* Module declarations from 'cython.view' */
/* Module declarations from 'cython' */
/* Module declarations from 'cpython.mem' */
/* Module declarations from 'skbio.stats.ordination._cutils' */
static PyTypeObject *__pyx_array_type = 0;
static PyTypeObject *__pyx_MemviewEnum_type = 0;
static PyTypeObject *__pyx_memoryview_type = 0;
static PyTypeObject *__pyx_memoryviewslice_type = 0;
static PyObject *generic = 0;
static PyObject *strided = 0;
static PyObject *indirect = 0;
static PyObject *contiguous = 0;
static PyObject *indirect_contiguous = 0;
static int __pyx_memoryview_thread_locks_used;
static PyThread_type_lock __pyx_memoryview_thread_locks[8];
static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/
static void *__pyx_align_pointer(void *, size_t); /*proto*/
static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/
static PyObject *_unellipsify(PyObject *, int); /*proto*/
static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/
static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/
static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/
static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/
static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/
static int __pyx_memoryview_err(PyObject *, char *); /*proto*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/
static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/
static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/
static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 };
static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 };
#define __Pyx_MODULE_NAME "skbio.stats.ordination._cutils"
extern int __pyx_module_is_main_skbio__stats__ordination___cutils;
int __pyx_module_is_main_skbio__stats__ordination___cutils = 0;
/* Implementation of 'skbio.stats.ordination._cutils' */
static PyObject *__pyx_builtin_TypeError;
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_MemoryError;
static PyObject *__pyx_builtin_enumerate;
static PyObject *__pyx_builtin_Ellipsis;
static PyObject *__pyx_builtin_id;
static PyObject *__pyx_builtin_IndexError;
static const char __pyx_k_[] = "()";
static const char __pyx_k_O[] = "O";
static const char __pyx_k_c[] = "c";
static const char __pyx_k_s[] = "s";
static const char __pyx_k__2[] = "|";
static const char __pyx_k_d2[] = "d2";
static const char __pyx_k_d3[] = "d3";
static const char __pyx_k_d4[] = "d4";
static const char __pyx_k_d5[] = "d5";
static const char __pyx_k_id[] = "id";
static const char __pyx_k_np[] = "np";
static const char __pyx_k_col[] = "col";
static const char __pyx_k_el0[] = "el0";
static const char __pyx_k_mat[] = "mat";
static const char __pyx_k_new[] = "__new__";
static const char __pyx_k_obj[] = "obj";
static const char __pyx_k_row[] = "row";
static const char __pyx_k_args[] = "args";
static const char __pyx_k_base[] = "base";
static const char __pyx_k_dict[] = "__dict__";
static const char __pyx_k_kind[] = "kind";
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_mode[] = "mode";
static const char __pyx_k_name[] = "name";
static const char __pyx_k_ndim[] = "ndim";
static const char __pyx_k_pack[] = "pack";
static const char __pyx_k_size[] = "size";
static const char __pyx_k_step[] = "step";
static const char __pyx_k_stop[] = "stop";
static const char __pyx_k_tcol[] = "tcol";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_trow[] = "trow";
static const char __pyx_k_ASCII[] = "ASCII";
static const char __pyx_k_class[] = "__class__";
static const char __pyx_k_dtype[] = "dtype";
static const char __pyx_k_error[] = "error";
static const char __pyx_k_flags[] = "flags";
static const char __pyx_k_float[] = "float";
static const char __pyx_k_numpy[] = "numpy";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_shape[] = "shape";
static const char __pyx_k_split[] = "split";
static const char __pyx_k_start[] = "start";
static const char __pyx_k_strip[] = "strip";
static const char __pyx_k_zeros[] = "zeros";
static const char __pyx_k_double[] = "double";
static const char __pyx_k_encode[] = "encode";
static const char __pyx_k_format[] = "format";
static const char __pyx_k_import[] = "__import__";
static const char __pyx_k_kwargs[] = "kwargs";
static const char __pyx_k_name_2[] = "__name__";
static const char __pyx_k_pickle[] = "pickle";
static const char __pyx_k_reduce[] = "__reduce__";
static const char __pyx_k_struct[] = "struct";
static const char __pyx_k_unpack[] = "unpack";
static const char __pyx_k_update[] = "update";
static const char __pyx_k_float32[] = "float32";
static const char __pyx_k_float64[] = "float64";
static const char __pyx_k_fortran[] = "fortran";
static const char __pyx_k_gr_mean[] = "gr_mean";
static const char __pyx_k_memview[] = "memview";
static const char __pyx_k_row_sum[] = "row_sum";
static const char __pyx_k_Ellipsis[] = "Ellipsis";
static const char __pyx_k_centered[] = "centered";
static const char __pyx_k_defaults[] = "defaults";
static const char __pyx_k_getstate[] = "__getstate__";
static const char __pyx_k_itemsize[] = "itemsize";
static const char __pyx_k_pyx_type[] = "__pyx_type";
static const char __pyx_k_setstate[] = "__setstate__";
static const char __pyx_k_tcol_max[] = "tcol_max";
static const char __pyx_k_trow_max[] = "trow_max";
static const char __pyx_k_TypeError[] = "TypeError";
static const char __pyx_k_enumerate[] = "enumerate";
static const char __pyx_k_n_samples[] = "n_samples";
static const char __pyx_k_pyx_state[] = "__pyx_state";
static const char __pyx_k_reduce_ex[] = "__reduce_ex__";
static const char __pyx_k_row_means[] = "row_means";
static const char __pyx_k_IndexError[] = "IndexError";
static const char __pyx_k_ValueError[] = "ValueError";
static const char __pyx_k_dtype_real[] = "dtype_real";
static const char __pyx_k_global_sum[] = "global_sum";
static const char __pyx_k_pyx_result[] = "__pyx_result";
static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__";
static const char __pyx_k_signatures[] = "signatures";
static const char __pyx_k_MemoryError[] = "MemoryError";
static const char __pyx_k_PickleError[] = "PickleError";
static const char __pyx_k_global_mean[] = "global_mean";
static const char __pyx_k_pyx_checksum[] = "__pyx_checksum";
static const char __pyx_k_row_means_np[] = "row_means_np";
static const char __pyx_k_stringsource[] = "stringsource";
static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer";
static const char __pyx_k_reduce_cython[] = "__reduce_cython__";
static const char __pyx_k_View_MemoryView[] = "View.MemoryView";
static const char __pyx_k_allocate_buffer[] = "allocate_buffer";
static const char __pyx_k_dtype_is_object[] = "dtype_is_object";
static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError";
static const char __pyx_k_setstate_cython[] = "__setstate_cython__";
static const char __pyx_k_e_matrix_means_cy[] = "e_matrix_means_cy";
static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum";
static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
static const char __pyx_k_strided_and_direct[] = "<strided and direct>";
static const char __pyx_k_f_matrix_inplace_cy[] = "f_matrix_inplace_cy";
static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>";
static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>";
static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>";
static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>";
static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>";
static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'";
static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d.";
static const char __pyx_k_center_distance_matrix_cy[] = "center_distance_matrix_cy";
static const char __pyx_k_No_matching_signature_found[] = "No matching signature found";
static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array";
static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data.";
static const char __pyx_k_skbio_stats_ordination__cutils[] = "skbio.stats.ordination._cutils";
static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>";
static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides";
static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory.";
static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview";
static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview";
static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array";
static const char __pyx_k_Expected_at_least_d_argument_s_g[] = "Expected at least %d argument%s, got %d";
static const char __pyx_k_Function_call_with_ambiguous_arg[] = "Function call with ambiguous argument types";
static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))";
static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported";
static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s";
static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)";
static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object";
static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)";
static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__";
static const char __pyx_k_skbio_stats_ordination__cutils_p[] = "skbio/stats/ordination/_cutils.pyx";
static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides.";
static PyObject *__pyx_kp_s_;
static PyObject *__pyx_n_s_ASCII;
static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri;
static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is;
static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor;
static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi;
static PyObject *__pyx_kp_s_Cannot_index_with_type_s;
static PyObject *__pyx_n_s_Ellipsis;
static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr;
static PyObject *__pyx_kp_s_Expected_at_least_d_argument_s_g;
static PyObject *__pyx_kp_s_Function_call_with_ambiguous_arg;
static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0;
static PyObject *__pyx_n_s_IndexError;
static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte;
static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr;
static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d;
static PyObject *__pyx_n_s_MemoryError;
static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x;
static PyObject *__pyx_kp_s_MemoryView_of_r_object;
static PyObject *__pyx_kp_s_No_matching_signature_found;
static PyObject *__pyx_n_b_O;
static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a;
static PyObject *__pyx_n_s_PickleError;
static PyObject *__pyx_n_s_TypeError;
static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_n_s_View_MemoryView;
static PyObject *__pyx_kp_s__2;
static PyObject *__pyx_n_s_allocate_buffer;
static PyObject *__pyx_n_s_args;
static PyObject *__pyx_n_s_base;
static PyObject *__pyx_n_s_c;
static PyObject *__pyx_n_u_c;
static PyObject *__pyx_n_s_center_distance_matrix_cy;
static PyObject *__pyx_n_s_centered;
static PyObject *__pyx_n_s_class;
static PyObject *__pyx_n_s_cline_in_traceback;
static PyObject *__pyx_n_s_col;
static PyObject *__pyx_kp_s_contiguous_and_direct;
static PyObject *__pyx_kp_s_contiguous_and_indirect;
static PyObject *__pyx_n_s_d2;
static PyObject *__pyx_n_s_d3;
static PyObject *__pyx_n_s_d4;
static PyObject *__pyx_n_s_d5;
static PyObject *__pyx_n_s_defaults;
static PyObject *__pyx_n_s_dict;
static PyObject *__pyx_n_s_double;
static PyObject *__pyx_n_s_dtype;
static PyObject *__pyx_n_s_dtype_is_object;
static PyObject *__pyx_n_s_dtype_real;
static PyObject *__pyx_n_s_e_matrix_means_cy;
static PyObject *__pyx_n_s_el0;
static PyObject *__pyx_n_s_encode;
static PyObject *__pyx_n_s_enumerate;
static PyObject *__pyx_n_s_error;
static PyObject *__pyx_n_s_f_matrix_inplace_cy;
static PyObject *__pyx_n_s_flags;
static PyObject *__pyx_n_s_float;
static PyObject *__pyx_n_s_float32;
static PyObject *__pyx_n_s_float64;
static PyObject *__pyx_n_s_format;
static PyObject *__pyx_n_s_fortran;
static PyObject *__pyx_n_u_fortran;
static PyObject *__pyx_n_s_getstate;
static PyObject *__pyx_n_s_global_mean;
static PyObject *__pyx_n_s_global_sum;
static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi;
static PyObject *__pyx_n_s_gr_mean;
static PyObject *__pyx_n_s_id;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_itemsize;
static PyObject *__pyx_kp_s_itemsize_0_for_cython_array;
static PyObject *__pyx_n_s_kind;
static PyObject *__pyx_n_s_kwargs;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_mat;
static PyObject *__pyx_n_s_memview;
static PyObject *__pyx_n_s_mode;
static PyObject *__pyx_n_s_n_samples;
static PyObject *__pyx_n_s_name;
static PyObject *__pyx_n_s_name_2;
static PyObject *__pyx_n_s_ndim;
static PyObject *__pyx_n_s_new;
static PyObject *__pyx_kp_s_no_default___reduce___due_to_non;
static PyObject *__pyx_n_s_np;
static PyObject *__pyx_n_s_numpy;
static PyObject *__pyx_n_s_obj;
static PyObject *__pyx_n_s_pack;
static PyObject *__pyx_n_s_pickle;
static PyObject *__pyx_n_s_pyx_PickleError;
static PyObject *__pyx_n_s_pyx_checksum;
static PyObject *__pyx_n_s_pyx_getbuffer;
static PyObject *__pyx_n_s_pyx_result;
static PyObject *__pyx_n_s_pyx_state;
static PyObject *__pyx_n_s_pyx_type;
static PyObject *__pyx_n_s_pyx_unpickle_Enum;
static PyObject *__pyx_n_s_pyx_vtable;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_reduce;
static PyObject *__pyx_n_s_reduce_cython;
static PyObject *__pyx_n_s_reduce_ex;
static PyObject *__pyx_n_s_row;
static PyObject *__pyx_n_s_row_means;
static PyObject *__pyx_n_s_row_means_np;
static PyObject *__pyx_n_s_row_sum;
static PyObject *__pyx_n_s_s;
static PyObject *__pyx_n_s_setstate;
static PyObject *__pyx_n_s_setstate_cython;
static PyObject *__pyx_n_s_shape;
static PyObject *__pyx_n_s_signatures;
static PyObject *__pyx_n_s_size;
static PyObject *__pyx_n_s_skbio_stats_ordination__cutils;
static PyObject *__pyx_kp_s_skbio_stats_ordination__cutils_p;
static PyObject *__pyx_n_s_split;
static PyObject *__pyx_n_s_start;
static PyObject *__pyx_n_s_step;
static PyObject *__pyx_n_s_stop;
static PyObject *__pyx_kp_s_strided_and_direct;
static PyObject *__pyx_kp_s_strided_and_direct_or_indirect;
static PyObject *__pyx_kp_s_strided_and_indirect;
static PyObject *__pyx_kp_s_stringsource;
static PyObject *__pyx_n_s_strip;
static PyObject *__pyx_n_s_struct;
static PyObject *__pyx_n_s_tcol;
static PyObject *__pyx_n_s_tcol_max;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_n_s_trow;
static PyObject *__pyx_n_s_trow_max;
static PyObject *__pyx_kp_s_unable_to_allocate_array_data;
static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str;
static PyObject *__pyx_n_s_unpack;
static PyObject *__pyx_n_s_update;
static PyObject *__pyx_n_s_zeros;
static PyObject *__pyx_pf_5skbio_5stats_10ordination_7_cutils_e_matrix_means_cy(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_signatures, PyObject *__pyx_v_args, PyObject *__pyx_v_kwargs, CYTHON_UNUSED PyObject *__pyx_v_defaults); /* proto */
static PyObject *__pyx_pf_5skbio_5stats_10ordination_7_cutils_6e_matrix_means_cy(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_mat, __Pyx_memviewslice __pyx_v_centered, __Pyx_memviewslice __pyx_v_row_means); /* proto */
static PyObject *__pyx_pf_5skbio_5stats_10ordination_7_cutils_8e_matrix_means_cy(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_mat, __Pyx_memviewslice __pyx_v_centered, __Pyx_memviewslice __pyx_v_row_means); /* proto */
static PyObject *__pyx_pf_5skbio_5stats_10ordination_7_cutils_2f_matrix_inplace_cy(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_signatures, PyObject *__pyx_v_args, PyObject *__pyx_v_kwargs, CYTHON_UNUSED PyObject *__pyx_v_defaults); /* proto */
static PyObject *__pyx_pf_5skbio_5stats_10ordination_7_cutils_12f_matrix_inplace_cy(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_row_means, float __pyx_v_global_mean, __Pyx_memviewslice __pyx_v_centered); /* proto */
static PyObject *__pyx_pf_5skbio_5stats_10ordination_7_cutils_14f_matrix_inplace_cy(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_row_means, double __pyx_v_global_mean, __Pyx_memviewslice __pyx_v_centered); /* proto */
static PyObject *__pyx_pf_5skbio_5stats_10ordination_7_cutils_4center_distance_matrix_cy(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_signatures, PyObject *__pyx_v_args, PyObject *__pyx_v_kwargs, CYTHON_UNUSED PyObject *__pyx_v_defaults); /* proto */
static PyObject *__pyx_pf_5skbio_5stats_10ordination_7_cutils_18center_distance_matrix_cy(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_mat, __Pyx_memviewslice __pyx_v_centered); /* proto */
static PyObject *__pyx_pf_5skbio_5stats_10ordination_7_cutils_20center_distance_matrix_cy(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_mat, __Pyx_memviewslice __pyx_v_centered); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_int_0;
static PyObject *__pyx_int_1;
static PyObject *__pyx_int_2;
static PyObject *__pyx_int_3;
static PyObject *__pyx_int_184977713;
static PyObject *__pyx_int_neg_1;
static PyObject *__pyx_tuple__3;
static PyObject *__pyx_tuple__4;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_tuple__7;
static PyObject *__pyx_tuple__8;
static PyObject *__pyx_tuple__9;
static PyObject *__pyx_slice__19;
static PyObject *__pyx_tuple__10;
static PyObject *__pyx_tuple__11;
static PyObject *__pyx_tuple__12;
static PyObject *__pyx_tuple__13;
static PyObject *__pyx_tuple__14;
static PyObject *__pyx_tuple__15;
static PyObject *__pyx_tuple__16;
static PyObject *__pyx_tuple__17;
static PyObject *__pyx_tuple__18;
static PyObject *__pyx_tuple__20;
static PyObject *__pyx_tuple__21;
static PyObject *__pyx_tuple__22;
static PyObject *__pyx_tuple__23;
static PyObject *__pyx_tuple__25;
static PyObject *__pyx_tuple__27;
static PyObject *__pyx_tuple__29;
static PyObject *__pyx_tuple__30;
static PyObject *__pyx_tuple__31;
static PyObject *__pyx_tuple__32;
static PyObject *__pyx_tuple__33;
static PyObject *__pyx_tuple__34;
static PyObject *__pyx_codeobj__24;
static PyObject *__pyx_codeobj__26;
static PyObject *__pyx_codeobj__28;
static PyObject *__pyx_codeobj__35;
/* Late includes */
/* "skbio/stats/ordination/_cutils.pyx":23
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def e_matrix_means_cy(TReal[:, ::1] mat, TReal[:, ::1] centered, TReal[::1] row_means): # <<<<<<<<<<<<<<
* """
* Compute E matrix from a distance matrix, and
*/
/* Python wrapper */
static PyObject *__pyx_pw_5skbio_5stats_10ordination_7_cutils_1e_matrix_means_cy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_5skbio_5stats_10ordination_7_cutils_e_matrix_means_cy[] = "\n Compute E matrix from a distance matrix, and \n also compute the means in the process.\n\n Squares and divides by -2 the input elementwise. Eq. 9.20 in\n Legendre & Legendre 1998.\n\n\n Parameters\n ----------\n mat : 2D array_like\n Distance matrix.\n centered : 2D array_like\n Output, E matrix. Must be pre-allocated and same shape as mat.\n Can point to mat (i.e. in-place)\n row_means : 1D_array_like\n Output, Mean values of each row in `centered`\n Returns\n -------\n global_mean : real\n Global mean value\n ";
static PyMethodDef __pyx_mdef_5skbio_5stats_10ordination_7_cutils_1e_matrix_means_cy = {"e_matrix_means_cy", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5skbio_5stats_10ordination_7_cutils_1e_matrix_means_cy, METH_VARARGS|METH_KEYWORDS, __pyx_doc_5skbio_5stats_10ordination_7_cutils_e_matrix_means_cy};
static PyObject *__pyx_pw_5skbio_5stats_10ordination_7_cutils_1e_matrix_means_cy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_signatures = 0;
PyObject *__pyx_v_args = 0;
PyObject *__pyx_v_kwargs = 0;
CYTHON_UNUSED PyObject *__pyx_v_defaults = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__pyx_fused_cpdef (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_signatures,&__pyx_n_s_args,&__pyx_n_s_kwargs,&__pyx_n_s_defaults,0};
PyObject* values[4] = {0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_signatures)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_args)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_fused_cpdef", 1, 4, 4, 1); __PYX_ERR(0, 23, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_kwargs)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_fused_cpdef", 1, 4, 4, 2); __PYX_ERR(0, 23, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_defaults)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_fused_cpdef", 1, 4, 4, 3); __PYX_ERR(0, 23, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_fused_cpdef") < 0)) __PYX_ERR(0, 23, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 4) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
}
__pyx_v_signatures = values[0];
__pyx_v_args = values[1];
__pyx_v_kwargs = values[2];
__pyx_v_defaults = values[3];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__pyx_fused_cpdef", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 23, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("skbio.stats.ordination._cutils.__pyx_fused_cpdef", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_5skbio_5stats_10ordination_7_cutils_e_matrix_means_cy(__pyx_self, __pyx_v_signatures, __pyx_v_args, __pyx_v_kwargs, __pyx_v_defaults);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_5skbio_5stats_10ordination_7_cutils_e_matrix_means_cy(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_signatures, PyObject *__pyx_v_args, PyObject *__pyx_v_kwargs, CYTHON_UNUSED PyObject *__pyx_v_defaults) {
PyObject *__pyx_v_dest_sig = NULL;
Py_ssize_t __pyx_v_i;
PyTypeObject *__pyx_v_ndarray = 0;
__Pyx_memviewslice __pyx_v_memslice;
Py_ssize_t __pyx_v_itemsize;
CYTHON_UNUSED int __pyx_v_dtype_signed;
char __pyx_v_kind;
PyObject *__pyx_v_arg = NULL;
PyObject *__pyx_v_dtype = NULL;
PyObject *__pyx_v_arg_base = NULL;
PyObject *__pyx_v_candidates = NULL;
PyObject *__pyx_v_sig = NULL;
int __pyx_v_match_found;
PyObject *__pyx_v_src_sig = NULL;
PyObject *__pyx_v_dst_type = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
Py_ssize_t __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
long __pyx_t_7;
__Pyx_memviewslice __pyx_t_8;
Py_ssize_t __pyx_t_9;
int __pyx_t_10;
int __pyx_t_11;
PyObject *__pyx_t_12 = NULL;
PyObject *__pyx_t_13 = NULL;
PyObject *__pyx_t_14 = NULL;
Py_ssize_t __pyx_t_15;
Py_ssize_t __pyx_t_16;
Py_ssize_t __pyx_t_17;
int __pyx_t_18;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("e_matrix_means_cy", 0);
__Pyx_INCREF(__pyx_v_kwargs);
__pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyList_SET_ITEM(__pyx_t_1, 0, Py_None);
__pyx_v_dest_sig = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_3 = (__pyx_v_kwargs != Py_None);
__pyx_t_4 = (__pyx_t_3 != 0);
if (__pyx_t_4) {
} else {
__pyx_t_2 = __pyx_t_4;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_kwargs); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 23, __pyx_L1_error)
__pyx_t_3 = ((!__pyx_t_4) != 0);
__pyx_t_2 = __pyx_t_3;
__pyx_L4_bool_binop_done:;
if (__pyx_t_2) {
__Pyx_INCREF(Py_None);
__Pyx_DECREF_SET(__pyx_v_kwargs, Py_None);
}
__pyx_t_1 = ((PyObject *)__Pyx_ImportNumPyArrayTypeIfAvailable()); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_ndarray = ((PyTypeObject*)__pyx_t_1);
__pyx_t_1 = 0;
__pyx_v_itemsize = -1L;
if (unlikely(__pyx_v_args == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(0, 23, __pyx_L1_error)
}
__pyx_t_5 = PyTuple_GET_SIZE(((PyObject*)__pyx_v_args)); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(0, 23, __pyx_L1_error)
__pyx_t_2 = ((0 < __pyx_t_5) != 0);
if (__pyx_t_2) {
if (unlikely(__pyx_v_args == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(0, 23, __pyx_L1_error)
}
__pyx_t_1 = PyTuple_GET_ITEM(((PyObject*)__pyx_v_args), 0);
__Pyx_INCREF(__pyx_t_1);
__pyx_v_arg = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L6;
}
__pyx_t_3 = (__pyx_v_kwargs != Py_None);
__pyx_t_4 = (__pyx_t_3 != 0);
if (__pyx_t_4) {
} else {
__pyx_t_2 = __pyx_t_4;
goto __pyx_L7_bool_binop_done;
}
if (unlikely(__pyx_v_kwargs == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
__PYX_ERR(0, 23, __pyx_L1_error)
}
__pyx_t_4 = (__Pyx_PyDict_ContainsTF(__pyx_n_s_mat, ((PyObject*)__pyx_v_kwargs), Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 23, __pyx_L1_error)
__pyx_t_3 = (__pyx_t_4 != 0);
__pyx_t_2 = __pyx_t_3;
__pyx_L7_bool_binop_done:;
if (__pyx_t_2) {
if (unlikely(__pyx_v_kwargs == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(0, 23, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_PyDict_GetItem(((PyObject*)__pyx_v_kwargs), __pyx_n_s_mat); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_arg = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L6;
}
/*else*/ {
if (unlikely(__pyx_v_args == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(0, 23, __pyx_L1_error)
}
__pyx_t_5 = PyTuple_GET_SIZE(((PyObject*)__pyx_v_args)); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(0, 23, __pyx_L1_error)
__pyx_t_1 = PyInt_FromSsize_t(__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_6 = PyTuple_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_INCREF(__pyx_int_3);
__Pyx_GIVEREF(__pyx_int_3);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_int_3);
__Pyx_INCREF(__pyx_n_s_s);
__Pyx_GIVEREF(__pyx_n_s_s);
PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_n_s_s);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_Expected_at_least_d_argument_s_g, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(0, 23, __pyx_L1_error)
}
__pyx_L6:;
while (1) {
__pyx_t_2 = (__pyx_v_ndarray != ((PyTypeObject*)Py_None));
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
__pyx_t_3 = __Pyx_TypeCheck(__pyx_v_arg, __pyx_v_ndarray);
__pyx_t_2 = (__pyx_t_3 != 0);
if (__pyx_t_2) {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_v_dtype = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L12;
}
__pyx_t_2 = __pyx_memoryview_check(__pyx_v_arg);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_base); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_v_arg_base = __pyx_t_6;
__pyx_t_6 = 0;
__pyx_t_3 = __Pyx_TypeCheck(__pyx_v_arg_base, __pyx_v_ndarray);
__pyx_t_2 = (__pyx_t_3 != 0);
if (__pyx_t_2) {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg_base, __pyx_n_s_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_v_dtype = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L13;
}
/*else*/ {
__Pyx_INCREF(Py_None);
__pyx_v_dtype = Py_None;
}
__pyx_L13:;
goto __pyx_L12;
}
/*else*/ {
__Pyx_INCREF(Py_None);
__pyx_v_dtype = Py_None;
}
__pyx_L12:;
__pyx_v_itemsize = -1L;
__pyx_t_2 = (__pyx_v_dtype != Py_None);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_dtype, __pyx_n_s_itemsize); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_5 = __Pyx_PyIndex_AsSsize_t(__pyx_t_6); if (unlikely((__pyx_t_5 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_v_itemsize = __pyx_t_5;
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_dtype, __pyx_n_s_kind); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = __Pyx_PyObject_Ord(__pyx_t_6); if (unlikely(__pyx_t_7 == ((long)(long)(Py_UCS4)-1))) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_v_kind = __pyx_t_7;
__pyx_v_dtype_signed = (__pyx_v_kind == 'i');
switch (__pyx_v_kind) {
case 'i':
case 'u':
break;
case 'f':
__pyx_t_2 = (((sizeof(float)) == __pyx_v_itemsize) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_3 = __pyx_t_2;
goto __pyx_L16_bool_binop_done;
}
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_ndim); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_5 = __Pyx_PyIndex_AsSsize_t(__pyx_t_6); if (unlikely((__pyx_t_5 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_2 = ((((Py_ssize_t)__pyx_t_5) == 2) != 0);
__pyx_t_3 = __pyx_t_2;
__pyx_L16_bool_binop_done:;
if (__pyx_t_3) {
if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_n_s_float, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) __PYX_ERR(0, 23, __pyx_L1_error)
goto __pyx_L10_break;
}
__pyx_t_2 = (((sizeof(double)) == __pyx_v_itemsize) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_3 = __pyx_t_2;
goto __pyx_L19_bool_binop_done;
}
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_ndim); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_5 = __Pyx_PyIndex_AsSsize_t(__pyx_t_6); if (unlikely((__pyx_t_5 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_2 = ((((Py_ssize_t)__pyx_t_5) == 2) != 0);
__pyx_t_3 = __pyx_t_2;
__pyx_L19_bool_binop_done:;
if (__pyx_t_3) {
if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_n_s_double, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) __PYX_ERR(0, 23, __pyx_L1_error)
goto __pyx_L10_break;
}
break;
case 'c':
break;
case 'O':
break;
default: break;
}
}
}
__pyx_t_2 = ((__pyx_v_itemsize == -1L) != 0);
if (!__pyx_t_2) {
} else {
__pyx_t_3 = __pyx_t_2;
goto __pyx_L22_bool_binop_done;
}
__pyx_t_2 = ((__pyx_v_itemsize == (sizeof(float))) != 0);
__pyx_t_3 = __pyx_t_2;
__pyx_L22_bool_binop_done:;
if (__pyx_t_3) {
__pyx_t_8 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_float(__pyx_v_arg, 0);
__pyx_v_memslice = __pyx_t_8;
__pyx_t_3 = (__pyx_v_memslice.memview != 0);
if (__pyx_t_3) {
__PYX_XDEC_MEMVIEW((&__pyx_v_memslice), 1);
if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_n_s_float, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) __PYX_ERR(0, 23, __pyx_L1_error)
goto __pyx_L10_break;
}
/*else*/ {
PyErr_Clear();
}
}
__pyx_t_2 = ((__pyx_v_itemsize == -1L) != 0);
if (!__pyx_t_2) {
} else {
__pyx_t_3 = __pyx_t_2;
goto __pyx_L26_bool_binop_done;
}
__pyx_t_2 = ((__pyx_v_itemsize == (sizeof(double))) != 0);
__pyx_t_3 = __pyx_t_2;
__pyx_L26_bool_binop_done:;
if (__pyx_t_3) {
__pyx_t_8 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_v_arg, 0);
__pyx_v_memslice = __pyx_t_8;
__pyx_t_3 = (__pyx_v_memslice.memview != 0);
if (__pyx_t_3) {
__PYX_XDEC_MEMVIEW((&__pyx_v_memslice), 1);
if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_n_s_double, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) __PYX_ERR(0, 23, __pyx_L1_error)
goto __pyx_L10_break;
}
/*else*/ {
PyErr_Clear();
}
}
if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, Py_None, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) __PYX_ERR(0, 23, __pyx_L1_error)
goto __pyx_L10_break;
}
__pyx_L10_break:;
__pyx_t_6 = PyList_New(0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_v_candidates = ((PyObject*)__pyx_t_6);
__pyx_t_6 = 0;
__pyx_t_5 = 0;
if (unlikely(__pyx_v_signatures == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
__PYX_ERR(0, 23, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_dict_iterator(((PyObject*)__pyx_v_signatures), 1, ((PyObject *)NULL), (&__pyx_t_9), (&__pyx_t_10)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_6);
__pyx_t_6 = __pyx_t_1;
__pyx_t_1 = 0;
while (1) {
__pyx_t_11 = __Pyx_dict_iter_next(__pyx_t_6, __pyx_t_9, &__pyx_t_5, &__pyx_t_1, NULL, NULL, __pyx_t_10);
if (unlikely(__pyx_t_11 == 0)) break;
if (unlikely(__pyx_t_11 == -1)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XDECREF_SET(__pyx_v_sig, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_v_match_found = 0;
__pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_v_sig, __pyx_n_s_strip); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_13);
__pyx_t_14 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_13))) {
__pyx_t_14 = PyMethod_GET_SELF(__pyx_t_13);
if (likely(__pyx_t_14)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_13);
__Pyx_INCREF(__pyx_t_14);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_13, function);
}
}
__pyx_t_12 = (__pyx_t_14) ? __Pyx_PyObject_Call2Args(__pyx_t_13, __pyx_t_14, __pyx_kp_s_) : __Pyx_PyObject_CallOneArg(__pyx_t_13, __pyx_kp_s_);
__Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0;
if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_12);
__Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
__pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_12, __pyx_n_s_split); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_13);
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
__pyx_t_12 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_13))) {
__pyx_t_12 = PyMethod_GET_SELF(__pyx_t_13);
if (likely(__pyx_t_12)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_13);
__Pyx_INCREF(__pyx_t_12);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_13, function);
}
}
__pyx_t_1 = (__pyx_t_12) ? __Pyx_PyObject_Call2Args(__pyx_t_13, __pyx_t_12, __pyx_kp_s__2) : __Pyx_PyObject_CallOneArg(__pyx_t_13, __pyx_kp_s__2);
__Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
__Pyx_XDECREF_SET(__pyx_v_src_sig, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_15 = PyList_GET_SIZE(__pyx_v_dest_sig); if (unlikely(__pyx_t_15 == ((Py_ssize_t)-1))) __PYX_ERR(0, 23, __pyx_L1_error)
__pyx_t_16 = __pyx_t_15;
for (__pyx_t_17 = 0; __pyx_t_17 < __pyx_t_16; __pyx_t_17+=1) {
__pyx_v_i = __pyx_t_17;
__pyx_t_1 = PyList_GET_ITEM(__pyx_v_dest_sig, __pyx_v_i);
__Pyx_INCREF(__pyx_t_1);
__Pyx_XDECREF_SET(__pyx_v_dst_type, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_3 = (__pyx_v_dst_type != Py_None);
__pyx_t_2 = (__pyx_t_3 != 0);
if (__pyx_t_2) {
__pyx_t_1 = __Pyx_GetItemInt(__pyx_v_src_sig, __pyx_v_i, Py_ssize_t, 1, PyInt_FromSsize_t, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_13 = PyObject_RichCompare(__pyx_t_1, __pyx_v_dst_type, Py_EQ); __Pyx_XGOTREF(__pyx_t_13); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_13); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
if (__pyx_t_2) {
__pyx_v_match_found = 1;
goto __pyx_L34;
}
/*else*/ {
__pyx_v_match_found = 0;
goto __pyx_L32_break;
}
__pyx_L34:;
}
}
__pyx_L32_break:;
__pyx_t_2 = (__pyx_v_match_found != 0);
if (__pyx_t_2) {
__pyx_t_18 = __Pyx_PyList_Append(__pyx_v_candidates, __pyx_v_sig); if (unlikely(__pyx_t_18 == ((int)-1))) __PYX_ERR(0, 23, __pyx_L1_error)
}
}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_2 = (PyList_GET_SIZE(__pyx_v_candidates) != 0);
__pyx_t_3 = ((!__pyx_t_2) != 0);
if (__pyx_t_3) {
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(0, 23, __pyx_L1_error)
}
__pyx_t_9 = PyList_GET_SIZE(__pyx_v_candidates); if (unlikely(__pyx_t_9 == ((Py_ssize_t)-1))) __PYX_ERR(0, 23, __pyx_L1_error)
__pyx_t_3 = ((__pyx_t_9 > 1) != 0);
if (__pyx_t_3) {
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(0, 23, __pyx_L1_error)
}
/*else*/ {
__Pyx_XDECREF(__pyx_r);
if (unlikely(__pyx_v_signatures == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(0, 23, __pyx_L1_error)
}
__pyx_t_6 = __Pyx_PyDict_GetItem(((PyObject*)__pyx_v_signatures), PyList_GET_ITEM(__pyx_v_candidates, 0)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_r = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L0;
}
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_12);
__Pyx_XDECREF(__pyx_t_13);
__Pyx_XDECREF(__pyx_t_14);
__Pyx_AddTraceback("skbio.stats.ordination._cutils.__pyx_fused_cpdef", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_dest_sig);
__Pyx_XDECREF(__pyx_v_ndarray);
__Pyx_XDECREF(__pyx_v_arg);
__Pyx_XDECREF(__pyx_v_dtype);
__Pyx_XDECREF(__pyx_v_arg_base);
__Pyx_XDECREF(__pyx_v_candidates);
__Pyx_XDECREF(__pyx_v_sig);
__Pyx_XDECREF(__pyx_v_src_sig);
__Pyx_XDECREF(__pyx_v_dst_type);
__Pyx_XDECREF(__pyx_v_kwargs);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_fuse_0__pyx_pw_5skbio_5stats_10ordination_7_cutils_7e_matrix_means_cy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_fuse_0__pyx_mdef_5skbio_5stats_10ordination_7_cutils_7e_matrix_means_cy = {"__pyx_fuse_0e_matrix_means_cy", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_fuse_0__pyx_pw_5skbio_5stats_10ordination_7_cutils_7e_matrix_means_cy, METH_VARARGS|METH_KEYWORDS, __pyx_doc_5skbio_5stats_10ordination_7_cutils_e_matrix_means_cy};
static PyObject *__pyx_fuse_0__pyx_pw_5skbio_5stats_10ordination_7_cutils_7e_matrix_means_cy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_mat = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_centered = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_row_means = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("e_matrix_means_cy (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_mat,&__pyx_n_s_centered,&__pyx_n_s_row_means,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mat)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_centered)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("e_matrix_means_cy", 1, 3, 3, 1); __PYX_ERR(0, 23, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_row_means)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("e_matrix_means_cy", 1, 3, 3, 2); __PYX_ERR(0, 23, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "e_matrix_means_cy") < 0)) __PYX_ERR(0, 23, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v_mat = __Pyx_PyObject_to_MemoryviewSlice_d_dc_float(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_mat.memview)) __PYX_ERR(0, 23, __pyx_L3_error)
__pyx_v_centered = __Pyx_PyObject_to_MemoryviewSlice_d_dc_float(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_centered.memview)) __PYX_ERR(0, 23, __pyx_L3_error)
__pyx_v_row_means = __Pyx_PyObject_to_MemoryviewSlice_dc_float(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_row_means.memview)) __PYX_ERR(0, 23, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("e_matrix_means_cy", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 23, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("skbio.stats.ordination._cutils.e_matrix_means_cy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_5skbio_5stats_10ordination_7_cutils_6e_matrix_means_cy(__pyx_self, __pyx_v_mat, __pyx_v_centered, __pyx_v_row_means);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_5skbio_5stats_10ordination_7_cutils_6e_matrix_means_cy(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_mat, __Pyx_memviewslice __pyx_v_centered, __Pyx_memviewslice __pyx_v_row_means) {
Py_ssize_t __pyx_v_n_samples;
Py_ssize_t __pyx_v_d2;
Py_ssize_t __pyx_v_d3;
Py_ssize_t __pyx_v_d4;
Py_ssize_t __pyx_v_d5;
Py_ssize_t __pyx_v_row;
Py_ssize_t __pyx_v_col;
long double __pyx_v_row_sum;
float __pyx_v_el0;
long double __pyx_v_global_sum;
float __pyx_v_global_mean;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
Py_ssize_t __pyx_t_7;
Py_ssize_t __pyx_t_8;
long double __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_fuse_0e_matrix_means_cy", 0);
/* "skbio/stats/ordination/_cutils.pyx":46
* Global mean value
* """
* cdef Py_ssize_t n_samples = mat.shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t d2 = mat.shape[1]
* cdef Py_ssize_t d3 = centered.shape[1]
*/
__pyx_v_n_samples = (__pyx_v_mat.shape[0]);
/* "skbio/stats/ordination/_cutils.pyx":47
* """
* cdef Py_ssize_t n_samples = mat.shape[0]
* cdef Py_ssize_t d2 = mat.shape[1] # <<<<<<<<<<<<<<
* cdef Py_ssize_t d3 = centered.shape[1]
* cdef Py_ssize_t d4 = centered.shape[1]
*/
__pyx_v_d2 = (__pyx_v_mat.shape[1]);
/* "skbio/stats/ordination/_cutils.pyx":48
* cdef Py_ssize_t n_samples = mat.shape[0]
* cdef Py_ssize_t d2 = mat.shape[1]
* cdef Py_ssize_t d3 = centered.shape[1] # <<<<<<<<<<<<<<
* cdef Py_ssize_t d4 = centered.shape[1]
* cdef Py_ssize_t d5 = row_means.shape[0]
*/
__pyx_v_d3 = (__pyx_v_centered.shape[1]);
/* "skbio/stats/ordination/_cutils.pyx":49
* cdef Py_ssize_t d2 = mat.shape[1]
* cdef Py_ssize_t d3 = centered.shape[1]
* cdef Py_ssize_t d4 = centered.shape[1] # <<<<<<<<<<<<<<
* cdef Py_ssize_t d5 = row_means.shape[0]
*
*/
__pyx_v_d4 = (__pyx_v_centered.shape[1]);
/* "skbio/stats/ordination/_cutils.pyx":50
* cdef Py_ssize_t d3 = centered.shape[1]
* cdef Py_ssize_t d4 = centered.shape[1]
* cdef Py_ssize_t d5 = row_means.shape[0] # <<<<<<<<<<<<<<
*
* assert n_samples == d2
*/
__pyx_v_d5 = (__pyx_v_row_means.shape[0]);
/* "skbio/stats/ordination/_cutils.pyx":52
* cdef Py_ssize_t d5 = row_means.shape[0]
*
* assert n_samples == d2 # <<<<<<<<<<<<<<
* assert n_samples == d3
* assert n_samples == d4
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_n_samples == __pyx_v_d2) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(0, 52, __pyx_L1_error)
}
}
#endif
/* "skbio/stats/ordination/_cutils.pyx":53
*
* assert n_samples == d2
* assert n_samples == d3 # <<<<<<<<<<<<<<
* assert n_samples == d4
* assert n_samples == d5
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_n_samples == __pyx_v_d3) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(0, 53, __pyx_L1_error)
}
}
#endif
/* "skbio/stats/ordination/_cutils.pyx":54
* assert n_samples == d2
* assert n_samples == d3
* assert n_samples == d4 # <<<<<<<<<<<<<<
* assert n_samples == d5
*
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_n_samples == __pyx_v_d4) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(0, 54, __pyx_L1_error)
}
}
#endif
/* "skbio/stats/ordination/_cutils.pyx":55
* assert n_samples == d3
* assert n_samples == d4
* assert n_samples == d5 # <<<<<<<<<<<<<<
*
* cdef Py_ssize_t row,col
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_n_samples == __pyx_v_d5) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(0, 55, __pyx_L1_error)
}
}
#endif
/* "skbio/stats/ordination/_cutils.pyx":61
* cdef TReal el0
*
* cdef long double global_sum = 0.0 # <<<<<<<<<<<<<<
* for row in prange(n_samples, nogil=True):
* row_sum = 0.0
*/
__pyx_v_global_sum = 0.0;
/* "skbio/stats/ordination/_cutils.pyx":62
*
* cdef long double global_sum = 0.0
* for row in prange(n_samples, nogil=True): # <<<<<<<<<<<<<<
* row_sum = 0.0
*
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
__pyx_t_1 = __pyx_v_n_samples;
if ((1 == 0)) abort();
{
Py_ssize_t __pyx_parallel_temp0 = ((Py_ssize_t)0xbad0bad0);
float __pyx_parallel_temp1 = ((float)__PYX_NAN());
long double __pyx_parallel_temp2 = ((long double)__PYX_NAN());
Py_ssize_t __pyx_parallel_temp3 = ((Py_ssize_t)0xbad0bad0);
long double __pyx_parallel_temp4 = ((long double)__PYX_NAN());
const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0;
PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL;
int __pyx_parallel_why;
__pyx_parallel_why = 0;
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_3 = (__pyx_t_1 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_3 > 0)
{
#ifdef _OPENMP
#pragma omp parallel reduction(+:__pyx_v_global_sum) private(__pyx_t_4, __pyx_t_5, __pyx_t_6, __pyx_t_7, __pyx_t_8) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
Py_BEGIN_ALLOW_THREADS
#endif /* _OPENMP */
#ifdef _OPENMP
#pragma omp for lastprivate(__pyx_v_col) lastprivate(__pyx_v_el0) firstprivate(__pyx_v_row) lastprivate(__pyx_v_row) lastprivate(__pyx_v_row_sum)
#endif /* _OPENMP */
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){
if (__pyx_parallel_why < 2)
{
__pyx_v_row = (Py_ssize_t)(0 + 1 * __pyx_t_2);
/* Initialize private variables to invalid values */
__pyx_v_col = ((Py_ssize_t)0xbad0bad0);
__pyx_v_el0 = ((float)__PYX_NAN());
__pyx_v_row_sum = ((long double)__PYX_NAN());
/* "skbio/stats/ordination/_cutils.pyx":63
* cdef long double global_sum = 0.0
* for row in prange(n_samples, nogil=True):
* row_sum = 0.0 # <<<<<<<<<<<<<<
*
* for col in range(n_samples):
*/
__pyx_v_row_sum = 0.0;
/* "skbio/stats/ordination/_cutils.pyx":65
* row_sum = 0.0
*
* for col in range(n_samples): # <<<<<<<<<<<<<<
* el0 = mat[row,col]
* el0 = -0.5*el0*el0
*/
__pyx_t_4 = __pyx_v_n_samples;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_col = __pyx_t_6;
/* "skbio/stats/ordination/_cutils.pyx":66
*
* for col in range(n_samples):
* el0 = mat[row,col] # <<<<<<<<<<<<<<
* el0 = -0.5*el0*el0
* centered[row,col] = el0
*/
__pyx_t_7 = __pyx_v_row;
__pyx_t_8 = __pyx_v_col;
__pyx_v_el0 = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_mat.data + __pyx_t_7 * __pyx_v_mat.strides[0]) )) + __pyx_t_8)) )));
/* "skbio/stats/ordination/_cutils.pyx":67
* for col in range(n_samples):
* el0 = mat[row,col]
* el0 = -0.5*el0*el0 # <<<<<<<<<<<<<<
* centered[row,col] = el0
* # Note: do not use +=, so it is not flagged as a global reduction
*/
__pyx_v_el0 = ((-0.5 * __pyx_v_el0) * __pyx_v_el0);
/* "skbio/stats/ordination/_cutils.pyx":68
* el0 = mat[row,col]
* el0 = -0.5*el0*el0
* centered[row,col] = el0 # <<<<<<<<<<<<<<
* # Note: do not use +=, so it is not flagged as a global reduction
* row_sum = row_sum + el0
*/
__pyx_t_8 = __pyx_v_row;
__pyx_t_7 = __pyx_v_col;
*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_centered.data + __pyx_t_8 * __pyx_v_centered.strides[0]) )) + __pyx_t_7)) )) = __pyx_v_el0;
/* "skbio/stats/ordination/_cutils.pyx":70
* centered[row,col] = el0
* # Note: do not use +=, so it is not flagged as a global reduction
* row_sum = row_sum + el0 # <<<<<<<<<<<<<<
*
* global_sum += row_sum
*/
__pyx_v_row_sum = (__pyx_v_row_sum + __pyx_v_el0);
}
/* "skbio/stats/ordination/_cutils.pyx":72
* row_sum = row_sum + el0
*
* global_sum += row_sum # <<<<<<<<<<<<<<
* row_means[row] = row_sum/n_samples
*
*/
__pyx_v_global_sum = (__pyx_v_global_sum + __pyx_v_row_sum);
/* "skbio/stats/ordination/_cutils.pyx":73
*
* global_sum += row_sum
* row_means[row] = row_sum/n_samples # <<<<<<<<<<<<<<
*
* cdef TReal global_mean = (global_sum/n_samples)/n_samples
*/
if (unlikely(__pyx_v_n_samples == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
__PYX_ERR(0, 73, __pyx_L8_error)
}
__pyx_t_7 = __pyx_v_row;
*((float *) ( /* dim=0 */ ((char *) (((float *) __pyx_v_row_means.data) + __pyx_t_7)) )) = (__pyx_v_row_sum / __pyx_v_n_samples);
goto __pyx_L13;
__pyx_L8_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_exc_type)
#endif /* _OPENMP */
if (!__pyx_parallel_exc_type) {
__Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb);
__pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno;
__Pyx_GOTREF(__pyx_parallel_exc_type);
}
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_parallel_why = 4;
goto __pyx_L12;
__pyx_L12:;
#ifdef _OPENMP
#pragma omp critical(__pyx_parallel_lastprivates0)
#endif /* _OPENMP */
{
__pyx_parallel_temp0 = __pyx_v_col;
__pyx_parallel_temp1 = __pyx_v_el0;
__pyx_parallel_temp2 = __pyx_v_global_sum;
__pyx_parallel_temp3 = __pyx_v_row;
__pyx_parallel_temp4 = __pyx_v_row_sum;
}
__pyx_L13:;
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_why)
#endif /* _OPENMP */
}
}
#ifdef _OPENMP
Py_END_ALLOW_THREADS
#else
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
#endif /* _OPENMP */
/* Clean up any temporaries */
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
#ifndef _OPENMP
}
#endif /* _OPENMP */
}
}
if (__pyx_parallel_exc_type) {
/* This may have been overridden by a continue, break or return in another thread. Prefer the error. */
__pyx_parallel_why = 4;
}
if (__pyx_parallel_why) {
__pyx_v_col = __pyx_parallel_temp0;
__pyx_v_el0 = __pyx_parallel_temp1;
__pyx_v_global_sum = __pyx_parallel_temp2;
__pyx_v_row = __pyx_parallel_temp3;
__pyx_v_row_sum = __pyx_parallel_temp4;
switch (__pyx_parallel_why) {
case 4:
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_GIVEREF(__pyx_parallel_exc_type);
__Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb);
__pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno;
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
goto __pyx_L4_error;
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "skbio/stats/ordination/_cutils.pyx":62
*
* cdef long double global_sum = 0.0
* for row in prange(n_samples, nogil=True): # <<<<<<<<<<<<<<
* row_sum = 0.0
*
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L4_error: {
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L1_error;
}
__pyx_L5:;
}
}
/* "skbio/stats/ordination/_cutils.pyx":75
* row_means[row] = row_sum/n_samples
*
* cdef TReal global_mean = (global_sum/n_samples)/n_samples # <<<<<<<<<<<<<<
*
* return global_mean
*/
if (unlikely(__pyx_v_n_samples == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
__PYX_ERR(0, 75, __pyx_L1_error)
}
__pyx_t_9 = (__pyx_v_global_sum / __pyx_v_n_samples);
if (unlikely(__pyx_v_n_samples == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
__PYX_ERR(0, 75, __pyx_L1_error)
}
__pyx_v_global_mean = (__pyx_t_9 / __pyx_v_n_samples);
/* "skbio/stats/ordination/_cutils.pyx":77
* cdef TReal global_mean = (global_sum/n_samples)/n_samples
*
* return global_mean # <<<<<<<<<<<<<<
*
* @cython.boundscheck(False)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_10 = PyFloat_FromDouble(__pyx_v_global_mean); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 77, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__pyx_r = __pyx_t_10;
__pyx_t_10 = 0;
goto __pyx_L0;
/* "skbio/stats/ordination/_cutils.pyx":23
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def e_matrix_means_cy(TReal[:, ::1] mat, TReal[:, ::1] centered, TReal[::1] row_means): # <<<<<<<<<<<<<<
* """
* Compute E matrix from a distance matrix, and
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("skbio.stats.ordination._cutils.e_matrix_means_cy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_mat, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_centered, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_row_means, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_fuse_1__pyx_pw_5skbio_5stats_10ordination_7_cutils_9e_matrix_means_cy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_fuse_1__pyx_mdef_5skbio_5stats_10ordination_7_cutils_9e_matrix_means_cy = {"__pyx_fuse_1e_matrix_means_cy", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_fuse_1__pyx_pw_5skbio_5stats_10ordination_7_cutils_9e_matrix_means_cy, METH_VARARGS|METH_KEYWORDS, __pyx_doc_5skbio_5stats_10ordination_7_cutils_e_matrix_means_cy};
static PyObject *__pyx_fuse_1__pyx_pw_5skbio_5stats_10ordination_7_cutils_9e_matrix_means_cy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_mat = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_centered = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_row_means = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("e_matrix_means_cy (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_mat,&__pyx_n_s_centered,&__pyx_n_s_row_means,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mat)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_centered)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("e_matrix_means_cy", 1, 3, 3, 1); __PYX_ERR(0, 23, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_row_means)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("e_matrix_means_cy", 1, 3, 3, 2); __PYX_ERR(0, 23, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "e_matrix_means_cy") < 0)) __PYX_ERR(0, 23, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v_mat = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_mat.memview)) __PYX_ERR(0, 23, __pyx_L3_error)
__pyx_v_centered = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_centered.memview)) __PYX_ERR(0, 23, __pyx_L3_error)
__pyx_v_row_means = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_row_means.memview)) __PYX_ERR(0, 23, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("e_matrix_means_cy", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 23, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("skbio.stats.ordination._cutils.e_matrix_means_cy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_5skbio_5stats_10ordination_7_cutils_8e_matrix_means_cy(__pyx_self, __pyx_v_mat, __pyx_v_centered, __pyx_v_row_means);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_5skbio_5stats_10ordination_7_cutils_8e_matrix_means_cy(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_mat, __Pyx_memviewslice __pyx_v_centered, __Pyx_memviewslice __pyx_v_row_means) {
Py_ssize_t __pyx_v_n_samples;
Py_ssize_t __pyx_v_d2;
Py_ssize_t __pyx_v_d3;
Py_ssize_t __pyx_v_d4;
Py_ssize_t __pyx_v_d5;
Py_ssize_t __pyx_v_row;
Py_ssize_t __pyx_v_col;
long double __pyx_v_row_sum;
double __pyx_v_el0;
long double __pyx_v_global_sum;
double __pyx_v_global_mean;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
Py_ssize_t __pyx_t_7;
Py_ssize_t __pyx_t_8;
long double __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_fuse_1e_matrix_means_cy", 0);
/* "skbio/stats/ordination/_cutils.pyx":46
* Global mean value
* """
* cdef Py_ssize_t n_samples = mat.shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t d2 = mat.shape[1]
* cdef Py_ssize_t d3 = centered.shape[1]
*/
__pyx_v_n_samples = (__pyx_v_mat.shape[0]);
/* "skbio/stats/ordination/_cutils.pyx":47
* """
* cdef Py_ssize_t n_samples = mat.shape[0]
* cdef Py_ssize_t d2 = mat.shape[1] # <<<<<<<<<<<<<<
* cdef Py_ssize_t d3 = centered.shape[1]
* cdef Py_ssize_t d4 = centered.shape[1]
*/
__pyx_v_d2 = (__pyx_v_mat.shape[1]);
/* "skbio/stats/ordination/_cutils.pyx":48
* cdef Py_ssize_t n_samples = mat.shape[0]
* cdef Py_ssize_t d2 = mat.shape[1]
* cdef Py_ssize_t d3 = centered.shape[1] # <<<<<<<<<<<<<<
* cdef Py_ssize_t d4 = centered.shape[1]
* cdef Py_ssize_t d5 = row_means.shape[0]
*/
__pyx_v_d3 = (__pyx_v_centered.shape[1]);
/* "skbio/stats/ordination/_cutils.pyx":49
* cdef Py_ssize_t d2 = mat.shape[1]
* cdef Py_ssize_t d3 = centered.shape[1]
* cdef Py_ssize_t d4 = centered.shape[1] # <<<<<<<<<<<<<<
* cdef Py_ssize_t d5 = row_means.shape[0]
*
*/
__pyx_v_d4 = (__pyx_v_centered.shape[1]);
/* "skbio/stats/ordination/_cutils.pyx":50
* cdef Py_ssize_t d3 = centered.shape[1]
* cdef Py_ssize_t d4 = centered.shape[1]
* cdef Py_ssize_t d5 = row_means.shape[0] # <<<<<<<<<<<<<<
*
* assert n_samples == d2
*/
__pyx_v_d5 = (__pyx_v_row_means.shape[0]);
/* "skbio/stats/ordination/_cutils.pyx":52
* cdef Py_ssize_t d5 = row_means.shape[0]
*
* assert n_samples == d2 # <<<<<<<<<<<<<<
* assert n_samples == d3
* assert n_samples == d4
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_n_samples == __pyx_v_d2) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(0, 52, __pyx_L1_error)
}
}
#endif
/* "skbio/stats/ordination/_cutils.pyx":53
*
* assert n_samples == d2
* assert n_samples == d3 # <<<<<<<<<<<<<<
* assert n_samples == d4
* assert n_samples == d5
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_n_samples == __pyx_v_d3) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(0, 53, __pyx_L1_error)
}
}
#endif
/* "skbio/stats/ordination/_cutils.pyx":54
* assert n_samples == d2
* assert n_samples == d3
* assert n_samples == d4 # <<<<<<<<<<<<<<
* assert n_samples == d5
*
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_n_samples == __pyx_v_d4) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(0, 54, __pyx_L1_error)
}
}
#endif
/* "skbio/stats/ordination/_cutils.pyx":55
* assert n_samples == d3
* assert n_samples == d4
* assert n_samples == d5 # <<<<<<<<<<<<<<
*
* cdef Py_ssize_t row,col
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_n_samples == __pyx_v_d5) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(0, 55, __pyx_L1_error)
}
}
#endif
/* "skbio/stats/ordination/_cutils.pyx":61
* cdef TReal el0
*
* cdef long double global_sum = 0.0 # <<<<<<<<<<<<<<
* for row in prange(n_samples, nogil=True):
* row_sum = 0.0
*/
__pyx_v_global_sum = 0.0;
/* "skbio/stats/ordination/_cutils.pyx":62
*
* cdef long double global_sum = 0.0
* for row in prange(n_samples, nogil=True): # <<<<<<<<<<<<<<
* row_sum = 0.0
*
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
__pyx_t_1 = __pyx_v_n_samples;
if ((1 == 0)) abort();
{
Py_ssize_t __pyx_parallel_temp0 = ((Py_ssize_t)0xbad0bad0);
double __pyx_parallel_temp1 = ((double)__PYX_NAN());
long double __pyx_parallel_temp2 = ((long double)__PYX_NAN());
Py_ssize_t __pyx_parallel_temp3 = ((Py_ssize_t)0xbad0bad0);
long double __pyx_parallel_temp4 = ((long double)__PYX_NAN());
const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0;
PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL;
int __pyx_parallel_why;
__pyx_parallel_why = 0;
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_3 = (__pyx_t_1 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_3 > 0)
{
#ifdef _OPENMP
#pragma omp parallel reduction(+:__pyx_v_global_sum) private(__pyx_t_4, __pyx_t_5, __pyx_t_6, __pyx_t_7, __pyx_t_8) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
Py_BEGIN_ALLOW_THREADS
#endif /* _OPENMP */
#ifdef _OPENMP
#pragma omp for lastprivate(__pyx_v_col) lastprivate(__pyx_v_el0) firstprivate(__pyx_v_row) lastprivate(__pyx_v_row) lastprivate(__pyx_v_row_sum)
#endif /* _OPENMP */
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){
if (__pyx_parallel_why < 2)
{
__pyx_v_row = (Py_ssize_t)(0 + 1 * __pyx_t_2);
/* Initialize private variables to invalid values */
__pyx_v_col = ((Py_ssize_t)0xbad0bad0);
__pyx_v_el0 = ((double)__PYX_NAN());
__pyx_v_row_sum = ((long double)__PYX_NAN());
/* "skbio/stats/ordination/_cutils.pyx":63
* cdef long double global_sum = 0.0
* for row in prange(n_samples, nogil=True):
* row_sum = 0.0 # <<<<<<<<<<<<<<
*
* for col in range(n_samples):
*/
__pyx_v_row_sum = 0.0;
/* "skbio/stats/ordination/_cutils.pyx":65
* row_sum = 0.0
*
* for col in range(n_samples): # <<<<<<<<<<<<<<
* el0 = mat[row,col]
* el0 = -0.5*el0*el0
*/
__pyx_t_4 = __pyx_v_n_samples;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_col = __pyx_t_6;
/* "skbio/stats/ordination/_cutils.pyx":66
*
* for col in range(n_samples):
* el0 = mat[row,col] # <<<<<<<<<<<<<<
* el0 = -0.5*el0*el0
* centered[row,col] = el0
*/
__pyx_t_7 = __pyx_v_row;
__pyx_t_8 = __pyx_v_col;
__pyx_v_el0 = (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_mat.data + __pyx_t_7 * __pyx_v_mat.strides[0]) )) + __pyx_t_8)) )));
/* "skbio/stats/ordination/_cutils.pyx":67
* for col in range(n_samples):
* el0 = mat[row,col]
* el0 = -0.5*el0*el0 # <<<<<<<<<<<<<<
* centered[row,col] = el0
* # Note: do not use +=, so it is not flagged as a global reduction
*/
__pyx_v_el0 = ((-0.5 * __pyx_v_el0) * __pyx_v_el0);
/* "skbio/stats/ordination/_cutils.pyx":68
* el0 = mat[row,col]
* el0 = -0.5*el0*el0
* centered[row,col] = el0 # <<<<<<<<<<<<<<
* # Note: do not use +=, so it is not flagged as a global reduction
* row_sum = row_sum + el0
*/
__pyx_t_8 = __pyx_v_row;
__pyx_t_7 = __pyx_v_col;
*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_centered.data + __pyx_t_8 * __pyx_v_centered.strides[0]) )) + __pyx_t_7)) )) = __pyx_v_el0;
/* "skbio/stats/ordination/_cutils.pyx":70
* centered[row,col] = el0
* # Note: do not use +=, so it is not flagged as a global reduction
* row_sum = row_sum + el0 # <<<<<<<<<<<<<<
*
* global_sum += row_sum
*/
__pyx_v_row_sum = (__pyx_v_row_sum + __pyx_v_el0);
}
/* "skbio/stats/ordination/_cutils.pyx":72
* row_sum = row_sum + el0
*
* global_sum += row_sum # <<<<<<<<<<<<<<
* row_means[row] = row_sum/n_samples
*
*/
__pyx_v_global_sum = (__pyx_v_global_sum + __pyx_v_row_sum);
/* "skbio/stats/ordination/_cutils.pyx":73
*
* global_sum += row_sum
* row_means[row] = row_sum/n_samples # <<<<<<<<<<<<<<
*
* cdef TReal global_mean = (global_sum/n_samples)/n_samples
*/
if (unlikely(__pyx_v_n_samples == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
__PYX_ERR(0, 73, __pyx_L8_error)
}
__pyx_t_7 = __pyx_v_row;
*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_row_means.data) + __pyx_t_7)) )) = (__pyx_v_row_sum / __pyx_v_n_samples);
goto __pyx_L13;
__pyx_L8_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_exc_type)
#endif /* _OPENMP */
if (!__pyx_parallel_exc_type) {
__Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb);
__pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno;
__Pyx_GOTREF(__pyx_parallel_exc_type);
}
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_parallel_why = 4;
goto __pyx_L12;
__pyx_L12:;
#ifdef _OPENMP
#pragma omp critical(__pyx_parallel_lastprivates1)
#endif /* _OPENMP */
{
__pyx_parallel_temp0 = __pyx_v_col;
__pyx_parallel_temp1 = __pyx_v_el0;
__pyx_parallel_temp2 = __pyx_v_global_sum;
__pyx_parallel_temp3 = __pyx_v_row;
__pyx_parallel_temp4 = __pyx_v_row_sum;
}
__pyx_L13:;
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_why)
#endif /* _OPENMP */
}
}
#ifdef _OPENMP
Py_END_ALLOW_THREADS
#else
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
#endif /* _OPENMP */
/* Clean up any temporaries */
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
#ifndef _OPENMP
}
#endif /* _OPENMP */
}
}
if (__pyx_parallel_exc_type) {
/* This may have been overridden by a continue, break or return in another thread. Prefer the error. */
__pyx_parallel_why = 4;
}
if (__pyx_parallel_why) {
__pyx_v_col = __pyx_parallel_temp0;
__pyx_v_el0 = __pyx_parallel_temp1;
__pyx_v_global_sum = __pyx_parallel_temp2;
__pyx_v_row = __pyx_parallel_temp3;
__pyx_v_row_sum = __pyx_parallel_temp4;
switch (__pyx_parallel_why) {
case 4:
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_GIVEREF(__pyx_parallel_exc_type);
__Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb);
__pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno;
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
goto __pyx_L4_error;
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "skbio/stats/ordination/_cutils.pyx":62
*
* cdef long double global_sum = 0.0
* for row in prange(n_samples, nogil=True): # <<<<<<<<<<<<<<
* row_sum = 0.0
*
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L4_error: {
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L1_error;
}
__pyx_L5:;
}
}
/* "skbio/stats/ordination/_cutils.pyx":75
* row_means[row] = row_sum/n_samples
*
* cdef TReal global_mean = (global_sum/n_samples)/n_samples # <<<<<<<<<<<<<<
*
* return global_mean
*/
if (unlikely(__pyx_v_n_samples == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
__PYX_ERR(0, 75, __pyx_L1_error)
}
__pyx_t_9 = (__pyx_v_global_sum / __pyx_v_n_samples);
if (unlikely(__pyx_v_n_samples == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
__PYX_ERR(0, 75, __pyx_L1_error)
}
__pyx_v_global_mean = (__pyx_t_9 / __pyx_v_n_samples);
/* "skbio/stats/ordination/_cutils.pyx":77
* cdef TReal global_mean = (global_sum/n_samples)/n_samples
*
* return global_mean # <<<<<<<<<<<<<<
*
* @cython.boundscheck(False)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_10 = PyFloat_FromDouble(__pyx_v_global_mean); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 77, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__pyx_r = __pyx_t_10;
__pyx_t_10 = 0;
goto __pyx_L0;
/* "skbio/stats/ordination/_cutils.pyx":23
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def e_matrix_means_cy(TReal[:, ::1] mat, TReal[:, ::1] centered, TReal[::1] row_means): # <<<<<<<<<<<<<<
* """
* Compute E matrix from a distance matrix, and
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("skbio.stats.ordination._cutils.e_matrix_means_cy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_mat, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_centered, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_row_means, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "skbio/stats/ordination/_cutils.pyx":81
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def f_matrix_inplace_cy(TReal[::1] row_means, TReal global_mean, TReal[:, ::1] centered): # <<<<<<<<<<<<<<
* """
* Compute F matrix from E matrix inplace.
*/
/* Python wrapper */
static PyObject *__pyx_pw_5skbio_5stats_10ordination_7_cutils_3f_matrix_inplace_cy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_5skbio_5stats_10ordination_7_cutils_2f_matrix_inplace_cy[] = "\n Compute F matrix from E matrix inplace.\n Centering step: for each element, the mean of the corresponding\n row and column are subtracted, and the mean of the whole\n matrix is added. Eq. 9.21 in Legendre & Legendre 1998.\n\n Modified from :func:`skbio.stats.ordination.f_matrix_inplace` function,\n\n Parameters\n ----------\n row_means : 1D_array_like\n Mean values of each row in `centered`\n global_mean : real\n Global mean value in `centered`\n centered : 2D array_like, must be symmetric\n In, a matrix representing the \"E matrix\" as described above.\n Out, the centered matrix\n ";
static PyMethodDef __pyx_mdef_5skbio_5stats_10ordination_7_cutils_3f_matrix_inplace_cy = {"f_matrix_inplace_cy", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5skbio_5stats_10ordination_7_cutils_3f_matrix_inplace_cy, METH_VARARGS|METH_KEYWORDS, __pyx_doc_5skbio_5stats_10ordination_7_cutils_2f_matrix_inplace_cy};
static PyObject *__pyx_pw_5skbio_5stats_10ordination_7_cutils_3f_matrix_inplace_cy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_signatures = 0;
PyObject *__pyx_v_args = 0;
PyObject *__pyx_v_kwargs = 0;
CYTHON_UNUSED PyObject *__pyx_v_defaults = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__pyx_fused_cpdef (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_signatures,&__pyx_n_s_args,&__pyx_n_s_kwargs,&__pyx_n_s_defaults,0};
PyObject* values[4] = {0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_signatures)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_args)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_fused_cpdef", 1, 4, 4, 1); __PYX_ERR(0, 81, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_kwargs)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_fused_cpdef", 1, 4, 4, 2); __PYX_ERR(0, 81, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_defaults)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_fused_cpdef", 1, 4, 4, 3); __PYX_ERR(0, 81, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_fused_cpdef") < 0)) __PYX_ERR(0, 81, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 4) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
}
__pyx_v_signatures = values[0];
__pyx_v_args = values[1];
__pyx_v_kwargs = values[2];
__pyx_v_defaults = values[3];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__pyx_fused_cpdef", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 81, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("skbio.stats.ordination._cutils.__pyx_fused_cpdef", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_5skbio_5stats_10ordination_7_cutils_2f_matrix_inplace_cy(__pyx_self, __pyx_v_signatures, __pyx_v_args, __pyx_v_kwargs, __pyx_v_defaults);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_5skbio_5stats_10ordination_7_cutils_2f_matrix_inplace_cy(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_signatures, PyObject *__pyx_v_args, PyObject *__pyx_v_kwargs, CYTHON_UNUSED PyObject *__pyx_v_defaults) {
PyObject *__pyx_v_dest_sig = NULL;
Py_ssize_t __pyx_v_i;
PyTypeObject *__pyx_v_ndarray = 0;
__Pyx_memviewslice __pyx_v_memslice;
Py_ssize_t __pyx_v_itemsize;
CYTHON_UNUSED int __pyx_v_dtype_signed;
char __pyx_v_kind;
PyObject *__pyx_v_arg = NULL;
PyObject *__pyx_v_dtype = NULL;
PyObject *__pyx_v_arg_base = NULL;
PyObject *__pyx_v_candidates = NULL;
PyObject *__pyx_v_sig = NULL;
int __pyx_v_match_found;
PyObject *__pyx_v_src_sig = NULL;
PyObject *__pyx_v_dst_type = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
Py_ssize_t __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
long __pyx_t_7;
__Pyx_memviewslice __pyx_t_8;
Py_ssize_t __pyx_t_9;
int __pyx_t_10;
int __pyx_t_11;
PyObject *__pyx_t_12 = NULL;
PyObject *__pyx_t_13 = NULL;
PyObject *__pyx_t_14 = NULL;
Py_ssize_t __pyx_t_15;
Py_ssize_t __pyx_t_16;
Py_ssize_t __pyx_t_17;
int __pyx_t_18;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("f_matrix_inplace_cy", 0);
__Pyx_INCREF(__pyx_v_kwargs);
__pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyList_SET_ITEM(__pyx_t_1, 0, Py_None);
__pyx_v_dest_sig = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_3 = (__pyx_v_kwargs != Py_None);
__pyx_t_4 = (__pyx_t_3 != 0);
if (__pyx_t_4) {
} else {
__pyx_t_2 = __pyx_t_4;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_kwargs); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 81, __pyx_L1_error)
__pyx_t_3 = ((!__pyx_t_4) != 0);
__pyx_t_2 = __pyx_t_3;
__pyx_L4_bool_binop_done:;
if (__pyx_t_2) {
__Pyx_INCREF(Py_None);
__Pyx_DECREF_SET(__pyx_v_kwargs, Py_None);
}
__pyx_t_1 = ((PyObject *)__Pyx_ImportNumPyArrayTypeIfAvailable()); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_ndarray = ((PyTypeObject*)__pyx_t_1);
__pyx_t_1 = 0;
__pyx_v_itemsize = -1L;
if (unlikely(__pyx_v_args == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(0, 81, __pyx_L1_error)
}
__pyx_t_5 = PyTuple_GET_SIZE(((PyObject*)__pyx_v_args)); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(0, 81, __pyx_L1_error)
__pyx_t_2 = ((0 < __pyx_t_5) != 0);
if (__pyx_t_2) {
if (unlikely(__pyx_v_args == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(0, 81, __pyx_L1_error)
}
__pyx_t_1 = PyTuple_GET_ITEM(((PyObject*)__pyx_v_args), 0);
__Pyx_INCREF(__pyx_t_1);
__pyx_v_arg = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L6;
}
__pyx_t_3 = (__pyx_v_kwargs != Py_None);
__pyx_t_4 = (__pyx_t_3 != 0);
if (__pyx_t_4) {
} else {
__pyx_t_2 = __pyx_t_4;
goto __pyx_L7_bool_binop_done;
}
if (unlikely(__pyx_v_kwargs == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
__PYX_ERR(0, 81, __pyx_L1_error)
}
__pyx_t_4 = (__Pyx_PyDict_ContainsTF(__pyx_n_s_row_means, ((PyObject*)__pyx_v_kwargs), Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 81, __pyx_L1_error)
__pyx_t_3 = (__pyx_t_4 != 0);
__pyx_t_2 = __pyx_t_3;
__pyx_L7_bool_binop_done:;
if (__pyx_t_2) {
if (unlikely(__pyx_v_kwargs == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(0, 81, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_PyDict_GetItem(((PyObject*)__pyx_v_kwargs), __pyx_n_s_row_means); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_arg = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L6;
}
/*else*/ {
if (unlikely(__pyx_v_args == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(0, 81, __pyx_L1_error)
}
__pyx_t_5 = PyTuple_GET_SIZE(((PyObject*)__pyx_v_args)); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(0, 81, __pyx_L1_error)
__pyx_t_1 = PyInt_FromSsize_t(__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_6 = PyTuple_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_INCREF(__pyx_int_3);
__Pyx_GIVEREF(__pyx_int_3);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_int_3);
__Pyx_INCREF(__pyx_n_s_s);
__Pyx_GIVEREF(__pyx_n_s_s);
PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_n_s_s);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_Expected_at_least_d_argument_s_g, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(0, 81, __pyx_L1_error)
}
__pyx_L6:;
while (1) {
__pyx_t_2 = (__pyx_v_ndarray != ((PyTypeObject*)Py_None));
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
__pyx_t_3 = __Pyx_TypeCheck(__pyx_v_arg, __pyx_v_ndarray);
__pyx_t_2 = (__pyx_t_3 != 0);
if (__pyx_t_2) {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_v_dtype = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L12;
}
__pyx_t_2 = __pyx_memoryview_check(__pyx_v_arg);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_base); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_v_arg_base = __pyx_t_6;
__pyx_t_6 = 0;
__pyx_t_3 = __Pyx_TypeCheck(__pyx_v_arg_base, __pyx_v_ndarray);
__pyx_t_2 = (__pyx_t_3 != 0);
if (__pyx_t_2) {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg_base, __pyx_n_s_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_v_dtype = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L13;
}
/*else*/ {
__Pyx_INCREF(Py_None);
__pyx_v_dtype = Py_None;
}
__pyx_L13:;
goto __pyx_L12;
}
/*else*/ {
__Pyx_INCREF(Py_None);
__pyx_v_dtype = Py_None;
}
__pyx_L12:;
__pyx_v_itemsize = -1L;
__pyx_t_2 = (__pyx_v_dtype != Py_None);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_dtype, __pyx_n_s_itemsize); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_5 = __Pyx_PyIndex_AsSsize_t(__pyx_t_6); if (unlikely((__pyx_t_5 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_v_itemsize = __pyx_t_5;
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_dtype, __pyx_n_s_kind); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = __Pyx_PyObject_Ord(__pyx_t_6); if (unlikely(__pyx_t_7 == ((long)(long)(Py_UCS4)-1))) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_v_kind = __pyx_t_7;
__pyx_v_dtype_signed = (__pyx_v_kind == 'i');
switch (__pyx_v_kind) {
case 'i':
case 'u':
break;
case 'f':
__pyx_t_2 = (((sizeof(float)) == __pyx_v_itemsize) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_3 = __pyx_t_2;
goto __pyx_L16_bool_binop_done;
}
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_ndim); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_5 = __Pyx_PyIndex_AsSsize_t(__pyx_t_6); if (unlikely((__pyx_t_5 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_2 = ((((Py_ssize_t)__pyx_t_5) == 1) != 0);
__pyx_t_3 = __pyx_t_2;
__pyx_L16_bool_binop_done:;
if (__pyx_t_3) {
if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_n_s_float, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) __PYX_ERR(0, 81, __pyx_L1_error)
goto __pyx_L10_break;
}
__pyx_t_2 = (((sizeof(double)) == __pyx_v_itemsize) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_3 = __pyx_t_2;
goto __pyx_L19_bool_binop_done;
}
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_ndim); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_5 = __Pyx_PyIndex_AsSsize_t(__pyx_t_6); if (unlikely((__pyx_t_5 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_2 = ((((Py_ssize_t)__pyx_t_5) == 1) != 0);
__pyx_t_3 = __pyx_t_2;
__pyx_L19_bool_binop_done:;
if (__pyx_t_3) {
if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_n_s_double, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) __PYX_ERR(0, 81, __pyx_L1_error)
goto __pyx_L10_break;
}
break;
case 'c':
break;
case 'O':
break;
default: break;
}
}
}
__pyx_t_2 = ((__pyx_v_itemsize == -1L) != 0);
if (!__pyx_t_2) {
} else {
__pyx_t_3 = __pyx_t_2;
goto __pyx_L22_bool_binop_done;
}
__pyx_t_2 = ((__pyx_v_itemsize == (sizeof(float))) != 0);
__pyx_t_3 = __pyx_t_2;
__pyx_L22_bool_binop_done:;
if (__pyx_t_3) {
__pyx_t_8 = __Pyx_PyObject_to_MemoryviewSlice_dc_float(__pyx_v_arg, 0);
__pyx_v_memslice = __pyx_t_8;
__pyx_t_3 = (__pyx_v_memslice.memview != 0);
if (__pyx_t_3) {
__PYX_XDEC_MEMVIEW((&__pyx_v_memslice), 1);
if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_n_s_float, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) __PYX_ERR(0, 81, __pyx_L1_error)
goto __pyx_L10_break;
}
/*else*/ {
PyErr_Clear();
}
}
__pyx_t_2 = ((__pyx_v_itemsize == -1L) != 0);
if (!__pyx_t_2) {
} else {
__pyx_t_3 = __pyx_t_2;
goto __pyx_L26_bool_binop_done;
}
__pyx_t_2 = ((__pyx_v_itemsize == (sizeof(double))) != 0);
__pyx_t_3 = __pyx_t_2;
__pyx_L26_bool_binop_done:;
if (__pyx_t_3) {
__pyx_t_8 = __Pyx_PyObject_to_MemoryviewSlice_dc_double(__pyx_v_arg, 0);
__pyx_v_memslice = __pyx_t_8;
__pyx_t_3 = (__pyx_v_memslice.memview != 0);
if (__pyx_t_3) {
__PYX_XDEC_MEMVIEW((&__pyx_v_memslice), 1);
if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_n_s_double, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) __PYX_ERR(0, 81, __pyx_L1_error)
goto __pyx_L10_break;
}
/*else*/ {
PyErr_Clear();
}
}
if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, Py_None, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) __PYX_ERR(0, 81, __pyx_L1_error)
goto __pyx_L10_break;
}
__pyx_L10_break:;
__pyx_t_6 = PyList_New(0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_v_candidates = ((PyObject*)__pyx_t_6);
__pyx_t_6 = 0;
__pyx_t_5 = 0;
if (unlikely(__pyx_v_signatures == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
__PYX_ERR(0, 81, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_dict_iterator(((PyObject*)__pyx_v_signatures), 1, ((PyObject *)NULL), (&__pyx_t_9), (&__pyx_t_10)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_6);
__pyx_t_6 = __pyx_t_1;
__pyx_t_1 = 0;
while (1) {
__pyx_t_11 = __Pyx_dict_iter_next(__pyx_t_6, __pyx_t_9, &__pyx_t_5, &__pyx_t_1, NULL, NULL, __pyx_t_10);
if (unlikely(__pyx_t_11 == 0)) break;
if (unlikely(__pyx_t_11 == -1)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XDECREF_SET(__pyx_v_sig, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_v_match_found = 0;
__pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_v_sig, __pyx_n_s_strip); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_13);
__pyx_t_14 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_13))) {
__pyx_t_14 = PyMethod_GET_SELF(__pyx_t_13);
if (likely(__pyx_t_14)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_13);
__Pyx_INCREF(__pyx_t_14);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_13, function);
}
}
__pyx_t_12 = (__pyx_t_14) ? __Pyx_PyObject_Call2Args(__pyx_t_13, __pyx_t_14, __pyx_kp_s_) : __Pyx_PyObject_CallOneArg(__pyx_t_13, __pyx_kp_s_);
__Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0;
if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_12);
__Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
__pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_12, __pyx_n_s_split); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_13);
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
__pyx_t_12 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_13))) {
__pyx_t_12 = PyMethod_GET_SELF(__pyx_t_13);
if (likely(__pyx_t_12)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_13);
__Pyx_INCREF(__pyx_t_12);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_13, function);
}
}
__pyx_t_1 = (__pyx_t_12) ? __Pyx_PyObject_Call2Args(__pyx_t_13, __pyx_t_12, __pyx_kp_s__2) : __Pyx_PyObject_CallOneArg(__pyx_t_13, __pyx_kp_s__2);
__Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
__Pyx_XDECREF_SET(__pyx_v_src_sig, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_15 = PyList_GET_SIZE(__pyx_v_dest_sig); if (unlikely(__pyx_t_15 == ((Py_ssize_t)-1))) __PYX_ERR(0, 81, __pyx_L1_error)
__pyx_t_16 = __pyx_t_15;
for (__pyx_t_17 = 0; __pyx_t_17 < __pyx_t_16; __pyx_t_17+=1) {
__pyx_v_i = __pyx_t_17;
__pyx_t_1 = PyList_GET_ITEM(__pyx_v_dest_sig, __pyx_v_i);
__Pyx_INCREF(__pyx_t_1);
__Pyx_XDECREF_SET(__pyx_v_dst_type, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_3 = (__pyx_v_dst_type != Py_None);
__pyx_t_2 = (__pyx_t_3 != 0);
if (__pyx_t_2) {
__pyx_t_1 = __Pyx_GetItemInt(__pyx_v_src_sig, __pyx_v_i, Py_ssize_t, 1, PyInt_FromSsize_t, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_13 = PyObject_RichCompare(__pyx_t_1, __pyx_v_dst_type, Py_EQ); __Pyx_XGOTREF(__pyx_t_13); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_13); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
if (__pyx_t_2) {
__pyx_v_match_found = 1;
goto __pyx_L34;
}
/*else*/ {
__pyx_v_match_found = 0;
goto __pyx_L32_break;
}
__pyx_L34:;
}
}
__pyx_L32_break:;
__pyx_t_2 = (__pyx_v_match_found != 0);
if (__pyx_t_2) {
__pyx_t_18 = __Pyx_PyList_Append(__pyx_v_candidates, __pyx_v_sig); if (unlikely(__pyx_t_18 == ((int)-1))) __PYX_ERR(0, 81, __pyx_L1_error)
}
}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_2 = (PyList_GET_SIZE(__pyx_v_candidates) != 0);
__pyx_t_3 = ((!__pyx_t_2) != 0);
if (__pyx_t_3) {
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(0, 81, __pyx_L1_error)
}
__pyx_t_9 = PyList_GET_SIZE(__pyx_v_candidates); if (unlikely(__pyx_t_9 == ((Py_ssize_t)-1))) __PYX_ERR(0, 81, __pyx_L1_error)
__pyx_t_3 = ((__pyx_t_9 > 1) != 0);
if (__pyx_t_3) {
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(0, 81, __pyx_L1_error)
}
/*else*/ {
__Pyx_XDECREF(__pyx_r);
if (unlikely(__pyx_v_signatures == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(0, 81, __pyx_L1_error)
}
__pyx_t_6 = __Pyx_PyDict_GetItem(((PyObject*)__pyx_v_signatures), PyList_GET_ITEM(__pyx_v_candidates, 0)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_r = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L0;
}
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_12);
__Pyx_XDECREF(__pyx_t_13);
__Pyx_XDECREF(__pyx_t_14);
__Pyx_AddTraceback("skbio.stats.ordination._cutils.__pyx_fused_cpdef", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_dest_sig);
__Pyx_XDECREF(__pyx_v_ndarray);
__Pyx_XDECREF(__pyx_v_arg);
__Pyx_XDECREF(__pyx_v_dtype);
__Pyx_XDECREF(__pyx_v_arg_base);
__Pyx_XDECREF(__pyx_v_candidates);
__Pyx_XDECREF(__pyx_v_sig);
__Pyx_XDECREF(__pyx_v_src_sig);
__Pyx_XDECREF(__pyx_v_dst_type);
__Pyx_XDECREF(__pyx_v_kwargs);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_fuse_0__pyx_pw_5skbio_5stats_10ordination_7_cutils_13f_matrix_inplace_cy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_fuse_0__pyx_mdef_5skbio_5stats_10ordination_7_cutils_13f_matrix_inplace_cy = {"__pyx_fuse_0f_matrix_inplace_cy", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_fuse_0__pyx_pw_5skbio_5stats_10ordination_7_cutils_13f_matrix_inplace_cy, METH_VARARGS|METH_KEYWORDS, __pyx_doc_5skbio_5stats_10ordination_7_cutils_2f_matrix_inplace_cy};
static PyObject *__pyx_fuse_0__pyx_pw_5skbio_5stats_10ordination_7_cutils_13f_matrix_inplace_cy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_row_means = { 0, 0, { 0 }, { 0 }, { 0 } };
float __pyx_v_global_mean;
__Pyx_memviewslice __pyx_v_centered = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("f_matrix_inplace_cy (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_row_means,&__pyx_n_s_global_mean,&__pyx_n_s_centered,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_row_means)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_global_mean)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("f_matrix_inplace_cy", 1, 3, 3, 1); __PYX_ERR(0, 81, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_centered)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("f_matrix_inplace_cy", 1, 3, 3, 2); __PYX_ERR(0, 81, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "f_matrix_inplace_cy") < 0)) __PYX_ERR(0, 81, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v_row_means = __Pyx_PyObject_to_MemoryviewSlice_dc_float(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_row_means.memview)) __PYX_ERR(0, 81, __pyx_L3_error)
__pyx_v_global_mean = __pyx_PyFloat_AsFloat(values[1]); if (unlikely((__pyx_v_global_mean == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 81, __pyx_L3_error)
__pyx_v_centered = __Pyx_PyObject_to_MemoryviewSlice_d_dc_float(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_centered.memview)) __PYX_ERR(0, 81, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("f_matrix_inplace_cy", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 81, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("skbio.stats.ordination._cutils.f_matrix_inplace_cy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_5skbio_5stats_10ordination_7_cutils_12f_matrix_inplace_cy(__pyx_self, __pyx_v_row_means, __pyx_v_global_mean, __pyx_v_centered);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_5skbio_5stats_10ordination_7_cutils_12f_matrix_inplace_cy(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_row_means, float __pyx_v_global_mean, __Pyx_memviewslice __pyx_v_centered) {
Py_ssize_t __pyx_v_n_samples;
Py_ssize_t __pyx_v_d2;
Py_ssize_t __pyx_v_d3;
Py_ssize_t __pyx_v_trow;
Py_ssize_t __pyx_v_tcol;
Py_ssize_t __pyx_v_row;
Py_ssize_t __pyx_v_col;
Py_ssize_t __pyx_v_trow_max;
Py_ssize_t __pyx_v_tcol_max;
float __pyx_v_gr_mean;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
Py_ssize_t __pyx_t_7;
Py_ssize_t __pyx_t_8;
Py_ssize_t __pyx_t_9;
Py_ssize_t __pyx_t_10;
Py_ssize_t __pyx_t_11;
Py_ssize_t __pyx_t_12;
Py_ssize_t __pyx_t_13;
Py_ssize_t __pyx_t_14;
Py_ssize_t __pyx_t_15;
Py_ssize_t __pyx_t_16;
Py_ssize_t __pyx_t_17;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_fuse_0f_matrix_inplace_cy", 0);
/* "skbio/stats/ordination/_cutils.pyx":100
* Out, the centered matrix
* """
* cdef Py_ssize_t n_samples = centered.shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t d2 = centered.shape[1]
* cdef Py_ssize_t d3 = row_means.shape[0]
*/
__pyx_v_n_samples = (__pyx_v_centered.shape[0]);
/* "skbio/stats/ordination/_cutils.pyx":101
* """
* cdef Py_ssize_t n_samples = centered.shape[0]
* cdef Py_ssize_t d2 = centered.shape[1] # <<<<<<<<<<<<<<
* cdef Py_ssize_t d3 = row_means.shape[0]
*
*/
__pyx_v_d2 = (__pyx_v_centered.shape[1]);
/* "skbio/stats/ordination/_cutils.pyx":102
* cdef Py_ssize_t n_samples = centered.shape[0]
* cdef Py_ssize_t d2 = centered.shape[1]
* cdef Py_ssize_t d3 = row_means.shape[0] # <<<<<<<<<<<<<<
*
* assert n_samples == d2
*/
__pyx_v_d3 = (__pyx_v_row_means.shape[0]);
/* "skbio/stats/ordination/_cutils.pyx":104
* cdef Py_ssize_t d3 = row_means.shape[0]
*
* assert n_samples == d2 # <<<<<<<<<<<<<<
* assert n_samples == d3
*
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_n_samples == __pyx_v_d2) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(0, 104, __pyx_L1_error)
}
}
#endif
/* "skbio/stats/ordination/_cutils.pyx":105
*
* assert n_samples == d2
* assert n_samples == d3 # <<<<<<<<<<<<<<
*
* cdef Py_ssize_t trow,tcol,row,col
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_n_samples == __pyx_v_d3) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(0, 105, __pyx_L1_error)
}
}
#endif
/* "skbio/stats/ordination/_cutils.pyx":112
*
* # use a tiled pattern to maximize locality of row_means
* for trow in prange(0, n_samples, 24, nogil=True): # <<<<<<<<<<<<<<
* trow_max = min(trow+24, n_samples)
*
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
__pyx_t_1 = __pyx_v_n_samples;
if ((24 == 0)) abort();
{
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_3 = (__pyx_t_1 - 0 + 24 - 24/abs(24)) / 24;
if (__pyx_t_3 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_4, __pyx_t_5, __pyx_t_6, __pyx_t_7, __pyx_t_8, __pyx_t_9)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for lastprivate(__pyx_v_col) lastprivate(__pyx_v_gr_mean) lastprivate(__pyx_v_row) lastprivate(__pyx_v_tcol) lastprivate(__pyx_v_tcol_max) firstprivate(__pyx_v_trow) lastprivate(__pyx_v_trow) lastprivate(__pyx_v_trow_max)
#endif /* _OPENMP */
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){
{
__pyx_v_trow = (Py_ssize_t)(0 + 24 * __pyx_t_2);
/* Initialize private variables to invalid values */
__pyx_v_col = ((Py_ssize_t)0xbad0bad0);
__pyx_v_gr_mean = ((float)__PYX_NAN());
__pyx_v_row = ((Py_ssize_t)0xbad0bad0);
__pyx_v_tcol = ((Py_ssize_t)0xbad0bad0);
__pyx_v_tcol_max = ((Py_ssize_t)0xbad0bad0);
__pyx_v_trow_max = ((Py_ssize_t)0xbad0bad0);
/* "skbio/stats/ordination/_cutils.pyx":113
* # use a tiled pattern to maximize locality of row_means
* for trow in prange(0, n_samples, 24, nogil=True):
* trow_max = min(trow+24, n_samples) # <<<<<<<<<<<<<<
*
* for tcol in range(0, n_samples, 24):
*/
__pyx_t_4 = __pyx_v_n_samples;
__pyx_t_5 = (__pyx_v_trow + 24);
if (((__pyx_t_4 < __pyx_t_5) != 0)) {
__pyx_t_6 = __pyx_t_4;
} else {
__pyx_t_6 = __pyx_t_5;
}
__pyx_v_trow_max = __pyx_t_6;
/* "skbio/stats/ordination/_cutils.pyx":115
* trow_max = min(trow+24, n_samples)
*
* for tcol in range(0, n_samples, 24): # <<<<<<<<<<<<<<
* tcol_max = min(tcol+24, n_samples)
*
*/
__pyx_t_6 = __pyx_v_n_samples;
__pyx_t_4 = __pyx_t_6;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=24) {
__pyx_v_tcol = __pyx_t_5;
/* "skbio/stats/ordination/_cutils.pyx":116
*
* for tcol in range(0, n_samples, 24):
* tcol_max = min(tcol+24, n_samples) # <<<<<<<<<<<<<<
*
* for row in range(trow, trow_max, 1):
*/
__pyx_t_7 = __pyx_v_n_samples;
__pyx_t_8 = (__pyx_v_tcol + 24);
if (((__pyx_t_7 < __pyx_t_8) != 0)) {
__pyx_t_9 = __pyx_t_7;
} else {
__pyx_t_9 = __pyx_t_8;
}
__pyx_v_tcol_max = __pyx_t_9;
/* "skbio/stats/ordination/_cutils.pyx":118
* tcol_max = min(tcol+24, n_samples)
*
* for row in range(trow, trow_max, 1): # <<<<<<<<<<<<<<
* gr_mean = global_mean - row_means[row]
*
*/
__pyx_t_9 = __pyx_v_trow_max;
__pyx_t_7 = __pyx_t_9;
for (__pyx_t_8 = __pyx_v_trow; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) {
__pyx_v_row = __pyx_t_8;
/* "skbio/stats/ordination/_cutils.pyx":119
*
* for row in range(trow, trow_max, 1):
* gr_mean = global_mean - row_means[row] # <<<<<<<<<<<<<<
*
* for col in range(tcol, tcol_max, 1):
*/
__pyx_t_10 = __pyx_v_row;
__pyx_v_gr_mean = (__pyx_v_global_mean - (*((float *) ( /* dim=0 */ ((char *) (((float *) __pyx_v_row_means.data) + __pyx_t_10)) ))));
/* "skbio/stats/ordination/_cutils.pyx":121
* gr_mean = global_mean - row_means[row]
*
* for col in range(tcol, tcol_max, 1): # <<<<<<<<<<<<<<
* # Note: do not use +=, so it is not flagged as a global reduction
* centered[row,col] = centered[row,col] + (gr_mean - row_means[col])
*/
__pyx_t_11 = __pyx_v_tcol_max;
__pyx_t_12 = __pyx_t_11;
for (__pyx_t_13 = __pyx_v_tcol; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) {
__pyx_v_col = __pyx_t_13;
/* "skbio/stats/ordination/_cutils.pyx":123
* for col in range(tcol, tcol_max, 1):
* # Note: do not use +=, so it is not flagged as a global reduction
* centered[row,col] = centered[row,col] + (gr_mean - row_means[col]) # <<<<<<<<<<<<<<
*
* @cython.boundscheck(False)
*/
__pyx_t_10 = __pyx_v_row;
__pyx_t_14 = __pyx_v_col;
__pyx_t_15 = __pyx_v_col;
__pyx_t_16 = __pyx_v_row;
__pyx_t_17 = __pyx_v_col;
*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_centered.data + __pyx_t_16 * __pyx_v_centered.strides[0]) )) + __pyx_t_17)) )) = ((*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_centered.data + __pyx_t_10 * __pyx_v_centered.strides[0]) )) + __pyx_t_14)) ))) + (__pyx_v_gr_mean - (*((float *) ( /* dim=0 */ ((char *) (((float *) __pyx_v_row_means.data) + __pyx_t_15)) )))));
}
}
}
}
}
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "skbio/stats/ordination/_cutils.pyx":112
*
* # use a tiled pattern to maximize locality of row_means
* for trow in prange(0, n_samples, 24, nogil=True): # <<<<<<<<<<<<<<
* trow_max = min(trow+24, n_samples)
*
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L5:;
}
}
/* "skbio/stats/ordination/_cutils.pyx":81
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def f_matrix_inplace_cy(TReal[::1] row_means, TReal global_mean, TReal[:, ::1] centered): # <<<<<<<<<<<<<<
* """
* Compute F matrix from E matrix inplace.
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("skbio.stats.ordination._cutils.f_matrix_inplace_cy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_row_means, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_centered, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_fuse_1__pyx_pw_5skbio_5stats_10ordination_7_cutils_15f_matrix_inplace_cy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_fuse_1__pyx_mdef_5skbio_5stats_10ordination_7_cutils_15f_matrix_inplace_cy = {"__pyx_fuse_1f_matrix_inplace_cy", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_fuse_1__pyx_pw_5skbio_5stats_10ordination_7_cutils_15f_matrix_inplace_cy, METH_VARARGS|METH_KEYWORDS, __pyx_doc_5skbio_5stats_10ordination_7_cutils_2f_matrix_inplace_cy};
static PyObject *__pyx_fuse_1__pyx_pw_5skbio_5stats_10ordination_7_cutils_15f_matrix_inplace_cy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_row_means = { 0, 0, { 0 }, { 0 }, { 0 } };
double __pyx_v_global_mean;
__Pyx_memviewslice __pyx_v_centered = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("f_matrix_inplace_cy (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_row_means,&__pyx_n_s_global_mean,&__pyx_n_s_centered,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_row_means)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_global_mean)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("f_matrix_inplace_cy", 1, 3, 3, 1); __PYX_ERR(0, 81, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_centered)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("f_matrix_inplace_cy", 1, 3, 3, 2); __PYX_ERR(0, 81, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "f_matrix_inplace_cy") < 0)) __PYX_ERR(0, 81, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v_row_means = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_row_means.memview)) __PYX_ERR(0, 81, __pyx_L3_error)
__pyx_v_global_mean = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_global_mean == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 81, __pyx_L3_error)
__pyx_v_centered = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_centered.memview)) __PYX_ERR(0, 81, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("f_matrix_inplace_cy", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 81, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("skbio.stats.ordination._cutils.f_matrix_inplace_cy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_5skbio_5stats_10ordination_7_cutils_14f_matrix_inplace_cy(__pyx_self, __pyx_v_row_means, __pyx_v_global_mean, __pyx_v_centered);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_5skbio_5stats_10ordination_7_cutils_14f_matrix_inplace_cy(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_row_means, double __pyx_v_global_mean, __Pyx_memviewslice __pyx_v_centered) {
Py_ssize_t __pyx_v_n_samples;
Py_ssize_t __pyx_v_d2;
Py_ssize_t __pyx_v_d3;
Py_ssize_t __pyx_v_trow;
Py_ssize_t __pyx_v_tcol;
Py_ssize_t __pyx_v_row;
Py_ssize_t __pyx_v_col;
Py_ssize_t __pyx_v_trow_max;
Py_ssize_t __pyx_v_tcol_max;
double __pyx_v_gr_mean;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
Py_ssize_t __pyx_t_7;
Py_ssize_t __pyx_t_8;
Py_ssize_t __pyx_t_9;
Py_ssize_t __pyx_t_10;
Py_ssize_t __pyx_t_11;
Py_ssize_t __pyx_t_12;
Py_ssize_t __pyx_t_13;
Py_ssize_t __pyx_t_14;
Py_ssize_t __pyx_t_15;
Py_ssize_t __pyx_t_16;
Py_ssize_t __pyx_t_17;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_fuse_1f_matrix_inplace_cy", 0);
/* "skbio/stats/ordination/_cutils.pyx":100
* Out, the centered matrix
* """
* cdef Py_ssize_t n_samples = centered.shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t d2 = centered.shape[1]
* cdef Py_ssize_t d3 = row_means.shape[0]
*/
__pyx_v_n_samples = (__pyx_v_centered.shape[0]);
/* "skbio/stats/ordination/_cutils.pyx":101
* """
* cdef Py_ssize_t n_samples = centered.shape[0]
* cdef Py_ssize_t d2 = centered.shape[1] # <<<<<<<<<<<<<<
* cdef Py_ssize_t d3 = row_means.shape[0]
*
*/
__pyx_v_d2 = (__pyx_v_centered.shape[1]);
/* "skbio/stats/ordination/_cutils.pyx":102
* cdef Py_ssize_t n_samples = centered.shape[0]
* cdef Py_ssize_t d2 = centered.shape[1]
* cdef Py_ssize_t d3 = row_means.shape[0] # <<<<<<<<<<<<<<
*
* assert n_samples == d2
*/
__pyx_v_d3 = (__pyx_v_row_means.shape[0]);
/* "skbio/stats/ordination/_cutils.pyx":104
* cdef Py_ssize_t d3 = row_means.shape[0]
*
* assert n_samples == d2 # <<<<<<<<<<<<<<
* assert n_samples == d3
*
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_n_samples == __pyx_v_d2) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(0, 104, __pyx_L1_error)
}
}
#endif
/* "skbio/stats/ordination/_cutils.pyx":105
*
* assert n_samples == d2
* assert n_samples == d3 # <<<<<<<<<<<<<<
*
* cdef Py_ssize_t trow,tcol,row,col
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_n_samples == __pyx_v_d3) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(0, 105, __pyx_L1_error)
}
}
#endif
/* "skbio/stats/ordination/_cutils.pyx":112
*
* # use a tiled pattern to maximize locality of row_means
* for trow in prange(0, n_samples, 24, nogil=True): # <<<<<<<<<<<<<<
* trow_max = min(trow+24, n_samples)
*
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
__pyx_t_1 = __pyx_v_n_samples;
if ((24 == 0)) abort();
{
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_3 = (__pyx_t_1 - 0 + 24 - 24/abs(24)) / 24;
if (__pyx_t_3 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_4, __pyx_t_5, __pyx_t_6, __pyx_t_7, __pyx_t_8, __pyx_t_9)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for lastprivate(__pyx_v_col) lastprivate(__pyx_v_gr_mean) lastprivate(__pyx_v_row) lastprivate(__pyx_v_tcol) lastprivate(__pyx_v_tcol_max) firstprivate(__pyx_v_trow) lastprivate(__pyx_v_trow) lastprivate(__pyx_v_trow_max)
#endif /* _OPENMP */
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){
{
__pyx_v_trow = (Py_ssize_t)(0 + 24 * __pyx_t_2);
/* Initialize private variables to invalid values */
__pyx_v_col = ((Py_ssize_t)0xbad0bad0);
__pyx_v_gr_mean = ((double)__PYX_NAN());
__pyx_v_row = ((Py_ssize_t)0xbad0bad0);
__pyx_v_tcol = ((Py_ssize_t)0xbad0bad0);
__pyx_v_tcol_max = ((Py_ssize_t)0xbad0bad0);
__pyx_v_trow_max = ((Py_ssize_t)0xbad0bad0);
/* "skbio/stats/ordination/_cutils.pyx":113
* # use a tiled pattern to maximize locality of row_means
* for trow in prange(0, n_samples, 24, nogil=True):
* trow_max = min(trow+24, n_samples) # <<<<<<<<<<<<<<
*
* for tcol in range(0, n_samples, 24):
*/
__pyx_t_4 = __pyx_v_n_samples;
__pyx_t_5 = (__pyx_v_trow + 24);
if (((__pyx_t_4 < __pyx_t_5) != 0)) {
__pyx_t_6 = __pyx_t_4;
} else {
__pyx_t_6 = __pyx_t_5;
}
__pyx_v_trow_max = __pyx_t_6;
/* "skbio/stats/ordination/_cutils.pyx":115
* trow_max = min(trow+24, n_samples)
*
* for tcol in range(0, n_samples, 24): # <<<<<<<<<<<<<<
* tcol_max = min(tcol+24, n_samples)
*
*/
__pyx_t_6 = __pyx_v_n_samples;
__pyx_t_4 = __pyx_t_6;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=24) {
__pyx_v_tcol = __pyx_t_5;
/* "skbio/stats/ordination/_cutils.pyx":116
*
* for tcol in range(0, n_samples, 24):
* tcol_max = min(tcol+24, n_samples) # <<<<<<<<<<<<<<
*
* for row in range(trow, trow_max, 1):
*/
__pyx_t_7 = __pyx_v_n_samples;
__pyx_t_8 = (__pyx_v_tcol + 24);
if (((__pyx_t_7 < __pyx_t_8) != 0)) {
__pyx_t_9 = __pyx_t_7;
} else {
__pyx_t_9 = __pyx_t_8;
}
__pyx_v_tcol_max = __pyx_t_9;
/* "skbio/stats/ordination/_cutils.pyx":118
* tcol_max = min(tcol+24, n_samples)
*
* for row in range(trow, trow_max, 1): # <<<<<<<<<<<<<<
* gr_mean = global_mean - row_means[row]
*
*/
__pyx_t_9 = __pyx_v_trow_max;
__pyx_t_7 = __pyx_t_9;
for (__pyx_t_8 = __pyx_v_trow; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) {
__pyx_v_row = __pyx_t_8;
/* "skbio/stats/ordination/_cutils.pyx":119
*
* for row in range(trow, trow_max, 1):
* gr_mean = global_mean - row_means[row] # <<<<<<<<<<<<<<
*
* for col in range(tcol, tcol_max, 1):
*/
__pyx_t_10 = __pyx_v_row;
__pyx_v_gr_mean = (__pyx_v_global_mean - (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_row_means.data) + __pyx_t_10)) ))));
/* "skbio/stats/ordination/_cutils.pyx":121
* gr_mean = global_mean - row_means[row]
*
* for col in range(tcol, tcol_max, 1): # <<<<<<<<<<<<<<
* # Note: do not use +=, so it is not flagged as a global reduction
* centered[row,col] = centered[row,col] + (gr_mean - row_means[col])
*/
__pyx_t_11 = __pyx_v_tcol_max;
__pyx_t_12 = __pyx_t_11;
for (__pyx_t_13 = __pyx_v_tcol; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) {
__pyx_v_col = __pyx_t_13;
/* "skbio/stats/ordination/_cutils.pyx":123
* for col in range(tcol, tcol_max, 1):
* # Note: do not use +=, so it is not flagged as a global reduction
* centered[row,col] = centered[row,col] + (gr_mean - row_means[col]) # <<<<<<<<<<<<<<
*
* @cython.boundscheck(False)
*/
__pyx_t_10 = __pyx_v_row;
__pyx_t_14 = __pyx_v_col;
__pyx_t_15 = __pyx_v_col;
__pyx_t_16 = __pyx_v_row;
__pyx_t_17 = __pyx_v_col;
*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_centered.data + __pyx_t_16 * __pyx_v_centered.strides[0]) )) + __pyx_t_17)) )) = ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_centered.data + __pyx_t_10 * __pyx_v_centered.strides[0]) )) + __pyx_t_14)) ))) + (__pyx_v_gr_mean - (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_row_means.data) + __pyx_t_15)) )))));
}
}
}
}
}
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "skbio/stats/ordination/_cutils.pyx":112
*
* # use a tiled pattern to maximize locality of row_means
* for trow in prange(0, n_samples, 24, nogil=True): # <<<<<<<<<<<<<<
* trow_max = min(trow+24, n_samples)
*
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L5:;
}
}
/* "skbio/stats/ordination/_cutils.pyx":81
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def f_matrix_inplace_cy(TReal[::1] row_means, TReal global_mean, TReal[:, ::1] centered): # <<<<<<<<<<<<<<
* """
* Compute F matrix from E matrix inplace.
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("skbio.stats.ordination._cutils.f_matrix_inplace_cy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_row_means, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_centered, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "skbio/stats/ordination/_cutils.pyx":127
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def center_distance_matrix_cy(TReal[:, ::1] mat, TReal[:, ::1] centered): # <<<<<<<<<<<<<<
* """
* Centers a distance matrix.
*/
/* Python wrapper */
static PyObject *__pyx_pw_5skbio_5stats_10ordination_7_cutils_5center_distance_matrix_cy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_5skbio_5stats_10ordination_7_cutils_4center_distance_matrix_cy[] = "\n Centers a distance matrix.\n\n Note: If the used distance was euclidean, pairwise distances\n needn't be computed from the data table Y because F_matrix =\n Y.dot(Y.T) (if Y has been centered).\n But since we're expecting distance_matrix to be non-euclidian,\n we do the following computation as per\n Numerical Ecology (Legendre & Legendre 1998).\n\n Parameters\n ----------\n mat : 2D array_like\n Distance matrix.\n centered : 2D array_like\n Output centered matrix. Must be pre-allocated and same shape as mat.\n Can point to mat (i.e. in-place)\n ";
static PyMethodDef __pyx_mdef_5skbio_5stats_10ordination_7_cutils_5center_distance_matrix_cy = {"center_distance_matrix_cy", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5skbio_5stats_10ordination_7_cutils_5center_distance_matrix_cy, METH_VARARGS|METH_KEYWORDS, __pyx_doc_5skbio_5stats_10ordination_7_cutils_4center_distance_matrix_cy};
static PyObject *__pyx_pw_5skbio_5stats_10ordination_7_cutils_5center_distance_matrix_cy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_signatures = 0;
PyObject *__pyx_v_args = 0;
PyObject *__pyx_v_kwargs = 0;
CYTHON_UNUSED PyObject *__pyx_v_defaults = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__pyx_fused_cpdef (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_signatures,&__pyx_n_s_args,&__pyx_n_s_kwargs,&__pyx_n_s_defaults,0};
PyObject* values[4] = {0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_signatures)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_args)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_fused_cpdef", 1, 4, 4, 1); __PYX_ERR(0, 127, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_kwargs)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_fused_cpdef", 1, 4, 4, 2); __PYX_ERR(0, 127, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_defaults)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_fused_cpdef", 1, 4, 4, 3); __PYX_ERR(0, 127, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_fused_cpdef") < 0)) __PYX_ERR(0, 127, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 4) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
}
__pyx_v_signatures = values[0];
__pyx_v_args = values[1];
__pyx_v_kwargs = values[2];
__pyx_v_defaults = values[3];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__pyx_fused_cpdef", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 127, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("skbio.stats.ordination._cutils.__pyx_fused_cpdef", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_5skbio_5stats_10ordination_7_cutils_4center_distance_matrix_cy(__pyx_self, __pyx_v_signatures, __pyx_v_args, __pyx_v_kwargs, __pyx_v_defaults);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_5skbio_5stats_10ordination_7_cutils_4center_distance_matrix_cy(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_signatures, PyObject *__pyx_v_args, PyObject *__pyx_v_kwargs, CYTHON_UNUSED PyObject *__pyx_v_defaults) {
PyObject *__pyx_v_dest_sig = NULL;
Py_ssize_t __pyx_v_i;
PyTypeObject *__pyx_v_ndarray = 0;
__Pyx_memviewslice __pyx_v_memslice;
Py_ssize_t __pyx_v_itemsize;
CYTHON_UNUSED int __pyx_v_dtype_signed;
char __pyx_v_kind;
PyObject *__pyx_v_arg = NULL;
PyObject *__pyx_v_dtype = NULL;
PyObject *__pyx_v_arg_base = NULL;
PyObject *__pyx_v_candidates = NULL;
PyObject *__pyx_v_sig = NULL;
int __pyx_v_match_found;
PyObject *__pyx_v_src_sig = NULL;
PyObject *__pyx_v_dst_type = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
Py_ssize_t __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
long __pyx_t_7;
__Pyx_memviewslice __pyx_t_8;
Py_ssize_t __pyx_t_9;
int __pyx_t_10;
int __pyx_t_11;
PyObject *__pyx_t_12 = NULL;
PyObject *__pyx_t_13 = NULL;
PyObject *__pyx_t_14 = NULL;
Py_ssize_t __pyx_t_15;
Py_ssize_t __pyx_t_16;
Py_ssize_t __pyx_t_17;
int __pyx_t_18;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("center_distance_matrix_cy", 0);
__Pyx_INCREF(__pyx_v_kwargs);
__pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyList_SET_ITEM(__pyx_t_1, 0, Py_None);
__pyx_v_dest_sig = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_3 = (__pyx_v_kwargs != Py_None);
__pyx_t_4 = (__pyx_t_3 != 0);
if (__pyx_t_4) {
} else {
__pyx_t_2 = __pyx_t_4;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_kwargs); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 127, __pyx_L1_error)
__pyx_t_3 = ((!__pyx_t_4) != 0);
__pyx_t_2 = __pyx_t_3;
__pyx_L4_bool_binop_done:;
if (__pyx_t_2) {
__Pyx_INCREF(Py_None);
__Pyx_DECREF_SET(__pyx_v_kwargs, Py_None);
}
__pyx_t_1 = ((PyObject *)__Pyx_ImportNumPyArrayTypeIfAvailable()); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_ndarray = ((PyTypeObject*)__pyx_t_1);
__pyx_t_1 = 0;
__pyx_v_itemsize = -1L;
if (unlikely(__pyx_v_args == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(0, 127, __pyx_L1_error)
}
__pyx_t_5 = PyTuple_GET_SIZE(((PyObject*)__pyx_v_args)); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(0, 127, __pyx_L1_error)
__pyx_t_2 = ((0 < __pyx_t_5) != 0);
if (__pyx_t_2) {
if (unlikely(__pyx_v_args == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(0, 127, __pyx_L1_error)
}
__pyx_t_1 = PyTuple_GET_ITEM(((PyObject*)__pyx_v_args), 0);
__Pyx_INCREF(__pyx_t_1);
__pyx_v_arg = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L6;
}
__pyx_t_3 = (__pyx_v_kwargs != Py_None);
__pyx_t_4 = (__pyx_t_3 != 0);
if (__pyx_t_4) {
} else {
__pyx_t_2 = __pyx_t_4;
goto __pyx_L7_bool_binop_done;
}
if (unlikely(__pyx_v_kwargs == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
__PYX_ERR(0, 127, __pyx_L1_error)
}
__pyx_t_4 = (__Pyx_PyDict_ContainsTF(__pyx_n_s_mat, ((PyObject*)__pyx_v_kwargs), Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 127, __pyx_L1_error)
__pyx_t_3 = (__pyx_t_4 != 0);
__pyx_t_2 = __pyx_t_3;
__pyx_L7_bool_binop_done:;
if (__pyx_t_2) {
if (unlikely(__pyx_v_kwargs == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(0, 127, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_PyDict_GetItem(((PyObject*)__pyx_v_kwargs), __pyx_n_s_mat); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_arg = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L6;
}
/*else*/ {
if (unlikely(__pyx_v_args == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(0, 127, __pyx_L1_error)
}
__pyx_t_5 = PyTuple_GET_SIZE(((PyObject*)__pyx_v_args)); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(0, 127, __pyx_L1_error)
__pyx_t_1 = PyInt_FromSsize_t(__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_6 = PyTuple_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_INCREF(__pyx_int_2);
__Pyx_GIVEREF(__pyx_int_2);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_int_2);
__Pyx_INCREF(__pyx_n_s_s);
__Pyx_GIVEREF(__pyx_n_s_s);
PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_n_s_s);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_Expected_at_least_d_argument_s_g, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(0, 127, __pyx_L1_error)
}
__pyx_L6:;
while (1) {
__pyx_t_2 = (__pyx_v_ndarray != ((PyTypeObject*)Py_None));
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
__pyx_t_3 = __Pyx_TypeCheck(__pyx_v_arg, __pyx_v_ndarray);
__pyx_t_2 = (__pyx_t_3 != 0);
if (__pyx_t_2) {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_v_dtype = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L12;
}
__pyx_t_2 = __pyx_memoryview_check(__pyx_v_arg);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_base); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_v_arg_base = __pyx_t_6;
__pyx_t_6 = 0;
__pyx_t_3 = __Pyx_TypeCheck(__pyx_v_arg_base, __pyx_v_ndarray);
__pyx_t_2 = (__pyx_t_3 != 0);
if (__pyx_t_2) {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg_base, __pyx_n_s_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_v_dtype = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L13;
}
/*else*/ {
__Pyx_INCREF(Py_None);
__pyx_v_dtype = Py_None;
}
__pyx_L13:;
goto __pyx_L12;
}
/*else*/ {
__Pyx_INCREF(Py_None);
__pyx_v_dtype = Py_None;
}
__pyx_L12:;
__pyx_v_itemsize = -1L;
__pyx_t_2 = (__pyx_v_dtype != Py_None);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_dtype, __pyx_n_s_itemsize); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_5 = __Pyx_PyIndex_AsSsize_t(__pyx_t_6); if (unlikely((__pyx_t_5 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_v_itemsize = __pyx_t_5;
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_dtype, __pyx_n_s_kind); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = __Pyx_PyObject_Ord(__pyx_t_6); if (unlikely(__pyx_t_7 == ((long)(long)(Py_UCS4)-1))) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_v_kind = __pyx_t_7;
__pyx_v_dtype_signed = (__pyx_v_kind == 'i');
switch (__pyx_v_kind) {
case 'i':
case 'u':
break;
case 'f':
__pyx_t_2 = (((sizeof(float)) == __pyx_v_itemsize) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_3 = __pyx_t_2;
goto __pyx_L16_bool_binop_done;
}
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_ndim); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_5 = __Pyx_PyIndex_AsSsize_t(__pyx_t_6); if (unlikely((__pyx_t_5 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_2 = ((((Py_ssize_t)__pyx_t_5) == 2) != 0);
__pyx_t_3 = __pyx_t_2;
__pyx_L16_bool_binop_done:;
if (__pyx_t_3) {
if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_n_s_float, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) __PYX_ERR(0, 127, __pyx_L1_error)
goto __pyx_L10_break;
}
__pyx_t_2 = (((sizeof(double)) == __pyx_v_itemsize) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_3 = __pyx_t_2;
goto __pyx_L19_bool_binop_done;
}
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_ndim); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_5 = __Pyx_PyIndex_AsSsize_t(__pyx_t_6); if (unlikely((__pyx_t_5 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_2 = ((((Py_ssize_t)__pyx_t_5) == 2) != 0);
__pyx_t_3 = __pyx_t_2;
__pyx_L19_bool_binop_done:;
if (__pyx_t_3) {
if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_n_s_double, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) __PYX_ERR(0, 127, __pyx_L1_error)
goto __pyx_L10_break;
}
break;
case 'c':
break;
case 'O':
break;
default: break;
}
}
}
__pyx_t_2 = ((__pyx_v_itemsize == -1L) != 0);
if (!__pyx_t_2) {
} else {
__pyx_t_3 = __pyx_t_2;
goto __pyx_L22_bool_binop_done;
}
__pyx_t_2 = ((__pyx_v_itemsize == (sizeof(float))) != 0);
__pyx_t_3 = __pyx_t_2;
__pyx_L22_bool_binop_done:;
if (__pyx_t_3) {
__pyx_t_8 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_float(__pyx_v_arg, 0);
__pyx_v_memslice = __pyx_t_8;
__pyx_t_3 = (__pyx_v_memslice.memview != 0);
if (__pyx_t_3) {
__PYX_XDEC_MEMVIEW((&__pyx_v_memslice), 1);
if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_n_s_float, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) __PYX_ERR(0, 127, __pyx_L1_error)
goto __pyx_L10_break;
}
/*else*/ {
PyErr_Clear();
}
}
__pyx_t_2 = ((__pyx_v_itemsize == -1L) != 0);
if (!__pyx_t_2) {
} else {
__pyx_t_3 = __pyx_t_2;
goto __pyx_L26_bool_binop_done;
}
__pyx_t_2 = ((__pyx_v_itemsize == (sizeof(double))) != 0);
__pyx_t_3 = __pyx_t_2;
__pyx_L26_bool_binop_done:;
if (__pyx_t_3) {
__pyx_t_8 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_v_arg, 0);
__pyx_v_memslice = __pyx_t_8;
__pyx_t_3 = (__pyx_v_memslice.memview != 0);
if (__pyx_t_3) {
__PYX_XDEC_MEMVIEW((&__pyx_v_memslice), 1);
if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_n_s_double, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) __PYX_ERR(0, 127, __pyx_L1_error)
goto __pyx_L10_break;
}
/*else*/ {
PyErr_Clear();
}
}
if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, Py_None, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) __PYX_ERR(0, 127, __pyx_L1_error)
goto __pyx_L10_break;
}
__pyx_L10_break:;
__pyx_t_6 = PyList_New(0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_v_candidates = ((PyObject*)__pyx_t_6);
__pyx_t_6 = 0;
__pyx_t_5 = 0;
if (unlikely(__pyx_v_signatures == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
__PYX_ERR(0, 127, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_dict_iterator(((PyObject*)__pyx_v_signatures), 1, ((PyObject *)NULL), (&__pyx_t_9), (&__pyx_t_10)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_6);
__pyx_t_6 = __pyx_t_1;
__pyx_t_1 = 0;
while (1) {
__pyx_t_11 = __Pyx_dict_iter_next(__pyx_t_6, __pyx_t_9, &__pyx_t_5, &__pyx_t_1, NULL, NULL, __pyx_t_10);
if (unlikely(__pyx_t_11 == 0)) break;
if (unlikely(__pyx_t_11 == -1)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XDECREF_SET(__pyx_v_sig, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_v_match_found = 0;
__pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_v_sig, __pyx_n_s_strip); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_13);
__pyx_t_14 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_13))) {
__pyx_t_14 = PyMethod_GET_SELF(__pyx_t_13);
if (likely(__pyx_t_14)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_13);
__Pyx_INCREF(__pyx_t_14);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_13, function);
}
}
__pyx_t_12 = (__pyx_t_14) ? __Pyx_PyObject_Call2Args(__pyx_t_13, __pyx_t_14, __pyx_kp_s_) : __Pyx_PyObject_CallOneArg(__pyx_t_13, __pyx_kp_s_);
__Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0;
if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_12);
__Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
__pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_12, __pyx_n_s_split); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_13);
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
__pyx_t_12 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_13))) {
__pyx_t_12 = PyMethod_GET_SELF(__pyx_t_13);
if (likely(__pyx_t_12)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_13);
__Pyx_INCREF(__pyx_t_12);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_13, function);
}
}
__pyx_t_1 = (__pyx_t_12) ? __Pyx_PyObject_Call2Args(__pyx_t_13, __pyx_t_12, __pyx_kp_s__2) : __Pyx_PyObject_CallOneArg(__pyx_t_13, __pyx_kp_s__2);
__Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
__Pyx_XDECREF_SET(__pyx_v_src_sig, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_15 = PyList_GET_SIZE(__pyx_v_dest_sig); if (unlikely(__pyx_t_15 == ((Py_ssize_t)-1))) __PYX_ERR(0, 127, __pyx_L1_error)
__pyx_t_16 = __pyx_t_15;
for (__pyx_t_17 = 0; __pyx_t_17 < __pyx_t_16; __pyx_t_17+=1) {
__pyx_v_i = __pyx_t_17;
__pyx_t_1 = PyList_GET_ITEM(__pyx_v_dest_sig, __pyx_v_i);
__Pyx_INCREF(__pyx_t_1);
__Pyx_XDECREF_SET(__pyx_v_dst_type, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_3 = (__pyx_v_dst_type != Py_None);
__pyx_t_2 = (__pyx_t_3 != 0);
if (__pyx_t_2) {
__pyx_t_1 = __Pyx_GetItemInt(__pyx_v_src_sig, __pyx_v_i, Py_ssize_t, 1, PyInt_FromSsize_t, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_13 = PyObject_RichCompare(__pyx_t_1, __pyx_v_dst_type, Py_EQ); __Pyx_XGOTREF(__pyx_t_13); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_13); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
if (__pyx_t_2) {
__pyx_v_match_found = 1;
goto __pyx_L34;
}
/*else*/ {
__pyx_v_match_found = 0;
goto __pyx_L32_break;
}
__pyx_L34:;
}
}
__pyx_L32_break:;
__pyx_t_2 = (__pyx_v_match_found != 0);
if (__pyx_t_2) {
__pyx_t_18 = __Pyx_PyList_Append(__pyx_v_candidates, __pyx_v_sig); if (unlikely(__pyx_t_18 == ((int)-1))) __PYX_ERR(0, 127, __pyx_L1_error)
}
}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_2 = (PyList_GET_SIZE(__pyx_v_candidates) != 0);
__pyx_t_3 = ((!__pyx_t_2) != 0);
if (__pyx_t_3) {
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(0, 127, __pyx_L1_error)
}
__pyx_t_9 = PyList_GET_SIZE(__pyx_v_candidates); if (unlikely(__pyx_t_9 == ((Py_ssize_t)-1))) __PYX_ERR(0, 127, __pyx_L1_error)
__pyx_t_3 = ((__pyx_t_9 > 1) != 0);
if (__pyx_t_3) {
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(0, 127, __pyx_L1_error)
}
/*else*/ {
__Pyx_XDECREF(__pyx_r);
if (unlikely(__pyx_v_signatures == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(0, 127, __pyx_L1_error)
}
__pyx_t_6 = __Pyx_PyDict_GetItem(((PyObject*)__pyx_v_signatures), PyList_GET_ITEM(__pyx_v_candidates, 0)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_r = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L0;
}
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_12);
__Pyx_XDECREF(__pyx_t_13);
__Pyx_XDECREF(__pyx_t_14);
__Pyx_AddTraceback("skbio.stats.ordination._cutils.__pyx_fused_cpdef", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_dest_sig);
__Pyx_XDECREF(__pyx_v_ndarray);
__Pyx_XDECREF(__pyx_v_arg);
__Pyx_XDECREF(__pyx_v_dtype);
__Pyx_XDECREF(__pyx_v_arg_base);
__Pyx_XDECREF(__pyx_v_candidates);
__Pyx_XDECREF(__pyx_v_sig);
__Pyx_XDECREF(__pyx_v_src_sig);
__Pyx_XDECREF(__pyx_v_dst_type);
__Pyx_XDECREF(__pyx_v_kwargs);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_fuse_0__pyx_pw_5skbio_5stats_10ordination_7_cutils_19center_distance_matrix_cy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_fuse_0__pyx_mdef_5skbio_5stats_10ordination_7_cutils_19center_distance_matrix_cy = {"__pyx_fuse_0center_distance_matrix_cy", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_fuse_0__pyx_pw_5skbio_5stats_10ordination_7_cutils_19center_distance_matrix_cy, METH_VARARGS|METH_KEYWORDS, __pyx_doc_5skbio_5stats_10ordination_7_cutils_4center_distance_matrix_cy};
static PyObject *__pyx_fuse_0__pyx_pw_5skbio_5stats_10ordination_7_cutils_19center_distance_matrix_cy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_mat = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_centered = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("center_distance_matrix_cy (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_mat,&__pyx_n_s_centered,0};
PyObject* values[2] = {0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mat)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_centered)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("center_distance_matrix_cy", 1, 2, 2, 1); __PYX_ERR(0, 127, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "center_distance_matrix_cy") < 0)) __PYX_ERR(0, 127, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
}
__pyx_v_mat = __Pyx_PyObject_to_MemoryviewSlice_d_dc_float(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_mat.memview)) __PYX_ERR(0, 127, __pyx_L3_error)
__pyx_v_centered = __Pyx_PyObject_to_MemoryviewSlice_d_dc_float(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_centered.memview)) __PYX_ERR(0, 127, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("center_distance_matrix_cy", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 127, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("skbio.stats.ordination._cutils.center_distance_matrix_cy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_5skbio_5stats_10ordination_7_cutils_18center_distance_matrix_cy(__pyx_self, __pyx_v_mat, __pyx_v_centered);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_5skbio_5stats_10ordination_7_cutils_18center_distance_matrix_cy(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_mat, __Pyx_memviewslice __pyx_v_centered) {
Py_ssize_t __pyx_v_n_samples;
Py_ssize_t __pyx_v_d2;
Py_ssize_t __pyx_v_d3;
Py_ssize_t __pyx_v_d4;
float __pyx_v_global_mean;
PyObject *__pyx_v_dtype_real = NULL;
PyObject *__pyx_v_row_means_np = NULL;
__Pyx_memviewslice __pyx_v_row_means = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
__Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
int __pyx_t_8;
PyObject *__pyx_t_9 = NULL;
float __pyx_t_10;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_fuse_0center_distance_matrix_cy", 0);
/* "skbio/stats/ordination/_cutils.pyx":146
* Can point to mat (i.e. in-place)
* """
* cdef Py_ssize_t n_samples = mat.shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t d2 = mat.shape[1]
* cdef Py_ssize_t d3 = centered.shape[1]
*/
__pyx_v_n_samples = (__pyx_v_mat.shape[0]);
/* "skbio/stats/ordination/_cutils.pyx":147
* """
* cdef Py_ssize_t n_samples = mat.shape[0]
* cdef Py_ssize_t d2 = mat.shape[1] # <<<<<<<<<<<<<<
* cdef Py_ssize_t d3 = centered.shape[1]
* cdef Py_ssize_t d4 = centered.shape[1]
*/
__pyx_v_d2 = (__pyx_v_mat.shape[1]);
/* "skbio/stats/ordination/_cutils.pyx":148
* cdef Py_ssize_t n_samples = mat.shape[0]
* cdef Py_ssize_t d2 = mat.shape[1]
* cdef Py_ssize_t d3 = centered.shape[1] # <<<<<<<<<<<<<<
* cdef Py_ssize_t d4 = centered.shape[1]
*
*/
__pyx_v_d3 = (__pyx_v_centered.shape[1]);
/* "skbio/stats/ordination/_cutils.pyx":149
* cdef Py_ssize_t d2 = mat.shape[1]
* cdef Py_ssize_t d3 = centered.shape[1]
* cdef Py_ssize_t d4 = centered.shape[1] # <<<<<<<<<<<<<<
*
* assert n_samples == d2
*/
__pyx_v_d4 = (__pyx_v_centered.shape[1]);
/* "skbio/stats/ordination/_cutils.pyx":151
* cdef Py_ssize_t d4 = centered.shape[1]
*
* assert n_samples == d2 # <<<<<<<<<<<<<<
* assert n_samples == d3
* assert n_samples == d4
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_n_samples == __pyx_v_d2) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(0, 151, __pyx_L1_error)
}
}
#endif
/* "skbio/stats/ordination/_cutils.pyx":152
*
* assert n_samples == d2
* assert n_samples == d3 # <<<<<<<<<<<<<<
* assert n_samples == d4
*
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_n_samples == __pyx_v_d3) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(0, 152, __pyx_L1_error)
}
}
#endif
/* "skbio/stats/ordination/_cutils.pyx":153
* assert n_samples == d2
* assert n_samples == d3
* assert n_samples == d4 # <<<<<<<<<<<<<<
*
* cdef TReal global_mean
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_n_samples == __pyx_v_d4) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(0, 153, __pyx_L1_error)
}
}
#endif
/* "skbio/stats/ordination/_cutils.pyx":158
*
* if TReal is float:
* dtype_real = np.float32 # <<<<<<<<<<<<<<
* else:
* dtype_real = np.float64
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 158, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float32); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 158, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_dtype_real = __pyx_t_2;
__pyx_t_2 = 0;
/* "skbio/stats/ordination/_cutils.pyx":162
* dtype_real = np.float64
*
* row_means_np = np.zeros((n_samples,), dtype=dtype_real) # <<<<<<<<<<<<<<
* cdef TReal[::1] row_means = row_means_np
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 162, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 162, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_n_samples); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 162, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 162, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 162, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 162, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_v_dtype_real) < 0) __PYX_ERR(0, 162, __pyx_L1_error)
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 162, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_row_means_np = __pyx_t_4;
__pyx_t_4 = 0;
/* "skbio/stats/ordination/_cutils.pyx":163
*
* row_means_np = np.zeros((n_samples,), dtype=dtype_real)
* cdef TReal[::1] row_means = row_means_np # <<<<<<<<<<<<<<
*
* global_mean = e_matrix_means_cy(mat, centered, row_means)
*/
__pyx_t_5 = __Pyx_PyObject_to_MemoryviewSlice_dc_float(__pyx_v_row_means_np, PyBUF_WRITABLE); if (unlikely(!__pyx_t_5.memview)) __PYX_ERR(0, 163, __pyx_L1_error)
__pyx_v_row_means = __pyx_t_5;
__pyx_t_5.memview = NULL;
__pyx_t_5.data = NULL;
/* "skbio/stats/ordination/_cutils.pyx":165
* cdef TReal[::1] row_means = row_means_np
*
* global_mean = e_matrix_means_cy(mat, centered, row_means) # <<<<<<<<<<<<<<
* f_matrix_inplace_cy(row_means, global_mean, centered)
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_e_matrix_means_cy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 165, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_mat, 2, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 165, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_centered, 2, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 165, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_6 = __pyx_memoryview_fromslice(__pyx_v_row_means, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 165, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = NULL;
__pyx_t_8 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
__pyx_t_8 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_3)) {
PyObject *__pyx_temp[4] = {__pyx_t_7, __pyx_t_2, __pyx_t_1, __pyx_t_6};
__pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_8, 3+__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 165, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) {
PyObject *__pyx_temp[4] = {__pyx_t_7, __pyx_t_2, __pyx_t_1, __pyx_t_6};
__pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_8, 3+__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 165, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
{
__pyx_t_9 = PyTuple_New(3+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 165, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
if (__pyx_t_7) {
__Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL;
}
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_9, 2+__pyx_t_8, __pyx_t_6);
__pyx_t_2 = 0;
__pyx_t_1 = 0;
__pyx_t_6 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_9, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 165, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_10 = __pyx_PyFloat_AsFloat(__pyx_t_4); if (unlikely((__pyx_t_10 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 165, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_v_global_mean = __pyx_t_10;
/* "skbio/stats/ordination/_cutils.pyx":166
*
* global_mean = e_matrix_means_cy(mat, centered, row_means)
* f_matrix_inplace_cy(row_means, global_mean, centered) # <<<<<<<<<<<<<<
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_f_matrix_inplace_cy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 166, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_9 = __pyx_memoryview_fromslice(__pyx_v_row_means, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 166, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_6 = PyFloat_FromDouble(__pyx_v_global_mean); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 166, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_centered, 2, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 166, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = NULL;
__pyx_t_8 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
__pyx_t_8 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_3)) {
PyObject *__pyx_temp[4] = {__pyx_t_2, __pyx_t_9, __pyx_t_6, __pyx_t_1};
__pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_8, 3+__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 166, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) {
PyObject *__pyx_temp[4] = {__pyx_t_2, __pyx_t_9, __pyx_t_6, __pyx_t_1};
__pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_8, 3+__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 166, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
{
__pyx_t_7 = PyTuple_New(3+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 166, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
if (__pyx_t_2) {
__Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_2); __pyx_t_2 = NULL;
}
__Pyx_GIVEREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_8, __pyx_t_9);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_8, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_7, 2+__pyx_t_8, __pyx_t_1);
__pyx_t_9 = 0;
__pyx_t_6 = 0;
__pyx_t_1 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_7, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 166, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
/* "skbio/stats/ordination/_cutils.pyx":127
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def center_distance_matrix_cy(TReal[:, ::1] mat, TReal[:, ::1] centered): # <<<<<<<<<<<<<<
* """
* Centers a distance matrix.
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__PYX_XDEC_MEMVIEW(&__pyx_t_5, 1);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("skbio.stats.ordination._cutils.center_distance_matrix_cy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_dtype_real);
__Pyx_XDECREF(__pyx_v_row_means_np);
__PYX_XDEC_MEMVIEW(&__pyx_v_row_means, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_mat, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_centered, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_fuse_1__pyx_pw_5skbio_5stats_10ordination_7_cutils_21center_distance_matrix_cy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_fuse_1__pyx_mdef_5skbio_5stats_10ordination_7_cutils_21center_distance_matrix_cy = {"__pyx_fuse_1center_distance_matrix_cy", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_fuse_1__pyx_pw_5skbio_5stats_10ordination_7_cutils_21center_distance_matrix_cy, METH_VARARGS|METH_KEYWORDS, __pyx_doc_5skbio_5stats_10ordination_7_cutils_4center_distance_matrix_cy};
static PyObject *__pyx_fuse_1__pyx_pw_5skbio_5stats_10ordination_7_cutils_21center_distance_matrix_cy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_mat = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_centered = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("center_distance_matrix_cy (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_mat,&__pyx_n_s_centered,0};
PyObject* values[2] = {0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mat)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_centered)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("center_distance_matrix_cy", 1, 2, 2, 1); __PYX_ERR(0, 127, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "center_distance_matrix_cy") < 0)) __PYX_ERR(0, 127, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
}
__pyx_v_mat = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_mat.memview)) __PYX_ERR(0, 127, __pyx_L3_error)
__pyx_v_centered = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_centered.memview)) __PYX_ERR(0, 127, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("center_distance_matrix_cy", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 127, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("skbio.stats.ordination._cutils.center_distance_matrix_cy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_5skbio_5stats_10ordination_7_cutils_20center_distance_matrix_cy(__pyx_self, __pyx_v_mat, __pyx_v_centered);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_5skbio_5stats_10ordination_7_cutils_20center_distance_matrix_cy(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_mat, __Pyx_memviewslice __pyx_v_centered) {
Py_ssize_t __pyx_v_n_samples;
Py_ssize_t __pyx_v_d2;
Py_ssize_t __pyx_v_d3;
Py_ssize_t __pyx_v_d4;
double __pyx_v_global_mean;
PyObject *__pyx_v_dtype_real = NULL;
PyObject *__pyx_v_row_means_np = NULL;
__Pyx_memviewslice __pyx_v_row_means = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
__Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
int __pyx_t_8;
PyObject *__pyx_t_9 = NULL;
double __pyx_t_10;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_fuse_1center_distance_matrix_cy", 0);
/* "skbio/stats/ordination/_cutils.pyx":146
* Can point to mat (i.e. in-place)
* """
* cdef Py_ssize_t n_samples = mat.shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t d2 = mat.shape[1]
* cdef Py_ssize_t d3 = centered.shape[1]
*/
__pyx_v_n_samples = (__pyx_v_mat.shape[0]);
/* "skbio/stats/ordination/_cutils.pyx":147
* """
* cdef Py_ssize_t n_samples = mat.shape[0]
* cdef Py_ssize_t d2 = mat.shape[1] # <<<<<<<<<<<<<<
* cdef Py_ssize_t d3 = centered.shape[1]
* cdef Py_ssize_t d4 = centered.shape[1]
*/
__pyx_v_d2 = (__pyx_v_mat.shape[1]);
/* "skbio/stats/ordination/_cutils.pyx":148
* cdef Py_ssize_t n_samples = mat.shape[0]
* cdef Py_ssize_t d2 = mat.shape[1]
* cdef Py_ssize_t d3 = centered.shape[1] # <<<<<<<<<<<<<<
* cdef Py_ssize_t d4 = centered.shape[1]
*
*/
__pyx_v_d3 = (__pyx_v_centered.shape[1]);
/* "skbio/stats/ordination/_cutils.pyx":149
* cdef Py_ssize_t d2 = mat.shape[1]
* cdef Py_ssize_t d3 = centered.shape[1]
* cdef Py_ssize_t d4 = centered.shape[1] # <<<<<<<<<<<<<<
*
* assert n_samples == d2
*/
__pyx_v_d4 = (__pyx_v_centered.shape[1]);
/* "skbio/stats/ordination/_cutils.pyx":151
* cdef Py_ssize_t d4 = centered.shape[1]
*
* assert n_samples == d2 # <<<<<<<<<<<<<<
* assert n_samples == d3
* assert n_samples == d4
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_n_samples == __pyx_v_d2) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(0, 151, __pyx_L1_error)
}
}
#endif
/* "skbio/stats/ordination/_cutils.pyx":152
*
* assert n_samples == d2
* assert n_samples == d3 # <<<<<<<<<<<<<<
* assert n_samples == d4
*
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_n_samples == __pyx_v_d3) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(0, 152, __pyx_L1_error)
}
}
#endif
/* "skbio/stats/ordination/_cutils.pyx":153
* assert n_samples == d2
* assert n_samples == d3
* assert n_samples == d4 # <<<<<<<<<<<<<<
*
* cdef TReal global_mean
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_n_samples == __pyx_v_d4) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(0, 153, __pyx_L1_error)
}
}
#endif
/* "skbio/stats/ordination/_cutils.pyx":160
* dtype_real = np.float32
* else:
* dtype_real = np.float64 # <<<<<<<<<<<<<<
*
* row_means_np = np.zeros((n_samples,), dtype=dtype_real)
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 160, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float64); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 160, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_dtype_real = __pyx_t_2;
__pyx_t_2 = 0;
/* "skbio/stats/ordination/_cutils.pyx":162
* dtype_real = np.float64
*
* row_means_np = np.zeros((n_samples,), dtype=dtype_real) # <<<<<<<<<<<<<<
* cdef TReal[::1] row_means = row_means_np
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 162, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 162, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_n_samples); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 162, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 162, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 162, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 162, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_v_dtype_real) < 0) __PYX_ERR(0, 162, __pyx_L1_error)
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 162, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_row_means_np = __pyx_t_4;
__pyx_t_4 = 0;
/* "skbio/stats/ordination/_cutils.pyx":163
*
* row_means_np = np.zeros((n_samples,), dtype=dtype_real)
* cdef TReal[::1] row_means = row_means_np # <<<<<<<<<<<<<<
*
* global_mean = e_matrix_means_cy(mat, centered, row_means)
*/
__pyx_t_5 = __Pyx_PyObject_to_MemoryviewSlice_dc_double(__pyx_v_row_means_np, PyBUF_WRITABLE); if (unlikely(!__pyx_t_5.memview)) __PYX_ERR(0, 163, __pyx_L1_error)
__pyx_v_row_means = __pyx_t_5;
__pyx_t_5.memview = NULL;
__pyx_t_5.data = NULL;
/* "skbio/stats/ordination/_cutils.pyx":165
* cdef TReal[::1] row_means = row_means_np
*
* global_mean = e_matrix_means_cy(mat, centered, row_means) # <<<<<<<<<<<<<<
* f_matrix_inplace_cy(row_means, global_mean, centered)
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_e_matrix_means_cy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 165, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_mat, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 165, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_centered, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 165, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_6 = __pyx_memoryview_fromslice(__pyx_v_row_means, 1, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 165, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = NULL;
__pyx_t_8 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
__pyx_t_8 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_3)) {
PyObject *__pyx_temp[4] = {__pyx_t_7, __pyx_t_2, __pyx_t_1, __pyx_t_6};
__pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_8, 3+__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 165, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) {
PyObject *__pyx_temp[4] = {__pyx_t_7, __pyx_t_2, __pyx_t_1, __pyx_t_6};
__pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_8, 3+__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 165, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
{
__pyx_t_9 = PyTuple_New(3+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 165, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
if (__pyx_t_7) {
__Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL;
}
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_9, 2+__pyx_t_8, __pyx_t_6);
__pyx_t_2 = 0;
__pyx_t_1 = 0;
__pyx_t_6 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_9, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 165, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_10 = __pyx_PyFloat_AsDouble(__pyx_t_4); if (unlikely((__pyx_t_10 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 165, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_v_global_mean = __pyx_t_10;
/* "skbio/stats/ordination/_cutils.pyx":166
*
* global_mean = e_matrix_means_cy(mat, centered, row_means)
* f_matrix_inplace_cy(row_means, global_mean, centered) # <<<<<<<<<<<<<<
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_f_matrix_inplace_cy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 166, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_9 = __pyx_memoryview_fromslice(__pyx_v_row_means, 1, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 166, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_6 = PyFloat_FromDouble(__pyx_v_global_mean); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 166, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_centered, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 166, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = NULL;
__pyx_t_8 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
__pyx_t_8 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_3)) {
PyObject *__pyx_temp[4] = {__pyx_t_2, __pyx_t_9, __pyx_t_6, __pyx_t_1};
__pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_8, 3+__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 166, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) {
PyObject *__pyx_temp[4] = {__pyx_t_2, __pyx_t_9, __pyx_t_6, __pyx_t_1};
__pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_8, 3+__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 166, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
{
__pyx_t_7 = PyTuple_New(3+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 166, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
if (__pyx_t_2) {
__Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_2); __pyx_t_2 = NULL;
}
__Pyx_GIVEREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_8, __pyx_t_9);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_8, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_7, 2+__pyx_t_8, __pyx_t_1);
__pyx_t_9 = 0;
__pyx_t_6 = 0;
__pyx_t_1 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_7, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 166, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
/* "skbio/stats/ordination/_cutils.pyx":127
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def center_distance_matrix_cy(TReal[:, ::1] mat, TReal[:, ::1] centered): # <<<<<<<<<<<<<<
* """
* Centers a distance matrix.
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__PYX_XDEC_MEMVIEW(&__pyx_t_5, 1);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("skbio.stats.ordination._cutils.center_distance_matrix_cy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_dtype_real);
__Pyx_XDECREF(__pyx_v_row_means_np);
__PYX_XDEC_MEMVIEW(&__pyx_v_row_means, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_mat, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_centered, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* Python wrapper */
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_shape = 0;
Py_ssize_t __pyx_v_itemsize;
PyObject *__pyx_v_format = 0;
PyObject *__pyx_v_mode = 0;
int __pyx_v_allocate_buffer;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0};
PyObject* values[5] = {0,0,0,0,0};
values[3] = ((PyObject *)__pyx_n_s_c);
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode);
if (value) { values[3] = value; kw_args--; }
}
CYTHON_FALLTHROUGH;
case 4:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer);
if (value) { values[4] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_shape = ((PyObject*)values[0]);
__pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error)
__pyx_v_format = values[2];
__pyx_v_mode = values[3];
if (values[4]) {
__pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error)
} else {
/* "View.MemoryView":123
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,
* mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<<
*
* cdef int idx
*/
__pyx_v_allocate_buffer = ((int)1);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error)
if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) {
PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error)
}
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer);
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) {
int __pyx_v_idx;
Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_dim;
PyObject **__pyx_v_p;
char __pyx_v_order;
int __pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
char *__pyx_t_7;
int __pyx_t_8;
Py_ssize_t __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
Py_ssize_t __pyx_t_11;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__cinit__", 0);
__Pyx_INCREF(__pyx_v_format);
/* "View.MemoryView":129
* cdef PyObject **p
*
* self.ndim = <int> len(shape) # <<<<<<<<<<<<<<
* self.itemsize = itemsize
*
*/
if (unlikely(__pyx_v_shape == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 129, __pyx_L1_error)
}
__pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error)
__pyx_v_self->ndim = ((int)__pyx_t_1);
/* "View.MemoryView":130
*
* self.ndim = <int> len(shape)
* self.itemsize = itemsize # <<<<<<<<<<<<<<
*
* if not self.ndim:
*/
__pyx_v_self->itemsize = __pyx_v_itemsize;
/* "View.MemoryView":132
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
__pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":133
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 133, __pyx_L1_error)
/* "View.MemoryView":132
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
}
/* "View.MemoryView":135
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
__pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":136
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if not isinstance(format, bytes):
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 136, __pyx_L1_error)
/* "View.MemoryView":135
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
}
/* "View.MemoryView":138
* raise ValueError("itemsize <= 0 for cython.array")
*
* if not isinstance(format, bytes): # <<<<<<<<<<<<<<
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
__pyx_t_2 = PyBytes_Check(__pyx_v_format);
__pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":139
*
* if not isinstance(format, bytes):
* format = format.encode('ASCII') # <<<<<<<<<<<<<<
* self._format = format # keep a reference to the byte string
* self.format = self._format
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_6)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_6);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
}
}
__pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII);
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":138
* raise ValueError("itemsize <= 0 for cython.array")
*
* if not isinstance(format, bytes): # <<<<<<<<<<<<<<
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
}
/* "View.MemoryView":140
* if not isinstance(format, bytes):
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<<
* self.format = self._format
*
*/
if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error)
__pyx_t_3 = __pyx_v_format;
__Pyx_INCREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__Pyx_GOTREF(__pyx_v_self->_format);
__Pyx_DECREF(__pyx_v_self->_format);
__pyx_v_self->_format = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":141
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
* self.format = self._format # <<<<<<<<<<<<<<
*
*
*/
if (unlikely(__pyx_v_self->_format == Py_None)) {
PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
__PYX_ERR(1, 141, __pyx_L1_error)
}
__pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error)
__pyx_v_self->format = __pyx_t_7;
/* "View.MemoryView":144
*
*
* self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<<
* self._strides = self._shape + self.ndim
*
*/
__pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2)));
/* "View.MemoryView":145
*
* self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2)
* self._strides = self._shape + self.ndim # <<<<<<<<<<<<<<
*
* if not self._shape:
*/
__pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim);
/* "View.MemoryView":147
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":148
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 148, __pyx_L1_error)
/* "View.MemoryView":147
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
}
/* "View.MemoryView":151
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
__pyx_t_8 = 0;
__pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0;
for (;;) {
if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
__pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_9;
__pyx_v_idx = __pyx_t_8;
__pyx_t_8 = (__pyx_t_8 + 1);
/* "View.MemoryView":152
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
__pyx_t_4 = ((__pyx_v_dim <= 0) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":153
* for idx, dim in enumerate(shape):
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<<
* self._shape[idx] = dim
*
*/
__pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6);
__pyx_t_5 = 0;
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 153, __pyx_L1_error)
/* "View.MemoryView":152
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
}
/* "View.MemoryView":154
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim # <<<<<<<<<<<<<<
*
* cdef char order
*/
(__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim;
/* "View.MemoryView":151
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":157
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error)
if (__pyx_t_4) {
/* "View.MemoryView":158
* cdef char order
* if mode == 'fortran':
* order = b'F' # <<<<<<<<<<<<<<
* self.mode = u'fortran'
* elif mode == 'c':
*/
__pyx_v_order = 'F';
/* "View.MemoryView":159
* if mode == 'fortran':
* order = b'F'
* self.mode = u'fortran' # <<<<<<<<<<<<<<
* elif mode == 'c':
* order = b'C'
*/
__Pyx_INCREF(__pyx_n_u_fortran);
__Pyx_GIVEREF(__pyx_n_u_fortran);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_fortran;
/* "View.MemoryView":157
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
goto __pyx_L10;
}
/* "View.MemoryView":160
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error)
if (likely(__pyx_t_4)) {
/* "View.MemoryView":161
* self.mode = u'fortran'
* elif mode == 'c':
* order = b'C' # <<<<<<<<<<<<<<
* self.mode = u'c'
* else:
*/
__pyx_v_order = 'C';
/* "View.MemoryView":162
* elif mode == 'c':
* order = b'C'
* self.mode = u'c' # <<<<<<<<<<<<<<
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*/
__Pyx_INCREF(__pyx_n_u_c);
__Pyx_GIVEREF(__pyx_n_u_c);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_c;
/* "View.MemoryView":160
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
goto __pyx_L10;
}
/* "View.MemoryView":164
* self.mode = u'c'
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<<
*
* self.len = fill_contig_strides_array(self._shape, self._strides,
*/
/*else*/ {
__pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 164, __pyx_L1_error)
}
__pyx_L10:;
/* "View.MemoryView":166
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*
* self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<<
* itemsize, self.ndim, order)
*
*/
__pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order);
/* "View.MemoryView":169
* itemsize, self.ndim, order)
*
* self.free_data = allocate_buffer # <<<<<<<<<<<<<<
* self.dtype_is_object = format == b'O'
* if allocate_buffer:
*/
__pyx_v_self->free_data = __pyx_v_allocate_buffer;
/* "View.MemoryView":170
*
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<<
* if allocate_buffer:
*
*/
__pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error)
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_v_self->dtype_is_object = __pyx_t_4;
/* "View.MemoryView":171
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_4 = (__pyx_v_allocate_buffer != 0);
if (__pyx_t_4) {
/* "View.MemoryView":174
*
*
* self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<<
* if not self.data:
* raise MemoryError("unable to allocate array data.")
*/
__pyx_v_self->data = ((char *)malloc(__pyx_v_self->len));
/* "View.MemoryView":175
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":176
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 176, __pyx_L1_error)
/* "View.MemoryView":175
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
}
/* "View.MemoryView":178
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
__pyx_t_4 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_4) {
/* "View.MemoryView":179
*
* if self.dtype_is_object:
* p = <PyObject **> self.data # <<<<<<<<<<<<<<
* for i in range(self.len / itemsize):
* p[i] = Py_None
*/
__pyx_v_p = ((PyObject **)__pyx_v_self->data);
/* "View.MemoryView":180
* if self.dtype_is_object:
* p = <PyObject **> self.data
* for i in range(self.len / itemsize): # <<<<<<<<<<<<<<
* p[i] = Py_None
* Py_INCREF(Py_None)
*/
if (unlikely(__pyx_v_itemsize == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(1, 180, __pyx_L1_error)
}
else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(1, 180, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize);
__pyx_t_9 = __pyx_t_1;
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) {
__pyx_v_i = __pyx_t_11;
/* "View.MemoryView":181
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
* p[i] = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
(__pyx_v_p[__pyx_v_i]) = Py_None;
/* "View.MemoryView":182
* for i in range(self.len / itemsize):
* p[i] = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
Py_INCREF(Py_None);
}
/* "View.MemoryView":178
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
}
/* "View.MemoryView":171
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_format);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":185
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_bufmode;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
char *__pyx_t_4;
Py_ssize_t __pyx_t_5;
int __pyx_t_6;
Py_ssize_t *__pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
if (__pyx_v_info == NULL) {
PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
return -1;
}
__Pyx_RefNannySetupContext("__getbuffer__", 0);
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
/* "View.MemoryView":186
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1 # <<<<<<<<<<<<<<
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = -1;
/* "View.MemoryView":187
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
__pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error)
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":188
* cdef int bufmode = -1
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
/* "View.MemoryView":187
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
goto __pyx_L3;
}
/* "View.MemoryView":189
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
__pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error)
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":190
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
*/
__pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
/* "View.MemoryView":189
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
}
__pyx_L3:;
/* "View.MemoryView":191
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
__pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":192
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 192, __pyx_L1_error)
/* "View.MemoryView":191
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
}
/* "View.MemoryView":193
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data # <<<<<<<<<<<<<<
* info.len = self.len
* info.ndim = self.ndim
*/
__pyx_t_4 = __pyx_v_self->data;
__pyx_v_info->buf = __pyx_t_4;
/* "View.MemoryView":194
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
* info.len = self.len # <<<<<<<<<<<<<<
* info.ndim = self.ndim
* info.shape = self._shape
*/
__pyx_t_5 = __pyx_v_self->len;
__pyx_v_info->len = __pyx_t_5;
/* "View.MemoryView":195
* info.buf = self.data
* info.len = self.len
* info.ndim = self.ndim # <<<<<<<<<<<<<<
* info.shape = self._shape
* info.strides = self._strides
*/
__pyx_t_6 = __pyx_v_self->ndim;
__pyx_v_info->ndim = __pyx_t_6;
/* "View.MemoryView":196
* info.len = self.len
* info.ndim = self.ndim
* info.shape = self._shape # <<<<<<<<<<<<<<
* info.strides = self._strides
* info.suboffsets = NULL
*/
__pyx_t_7 = __pyx_v_self->_shape;
__pyx_v_info->shape = __pyx_t_7;
/* "View.MemoryView":197
* info.ndim = self.ndim
* info.shape = self._shape
* info.strides = self._strides # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = self.itemsize
*/
__pyx_t_7 = __pyx_v_self->_strides;
__pyx_v_info->strides = __pyx_t_7;
/* "View.MemoryView":198
* info.shape = self._shape
* info.strides = self._strides
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = self.itemsize
* info.readonly = 0
*/
__pyx_v_info->suboffsets = NULL;
/* "View.MemoryView":199
* info.strides = self._strides
* info.suboffsets = NULL
* info.itemsize = self.itemsize # <<<<<<<<<<<<<<
* info.readonly = 0
*
*/
__pyx_t_5 = __pyx_v_self->itemsize;
__pyx_v_info->itemsize = __pyx_t_5;
/* "View.MemoryView":200
* info.suboffsets = NULL
* info.itemsize = self.itemsize
* info.readonly = 0 # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
__pyx_v_info->readonly = 0;
/* "View.MemoryView":202
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":203
*
* if flags & PyBUF_FORMAT:
* info.format = self.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_4 = __pyx_v_self->format;
__pyx_v_info->format = __pyx_t_4;
/* "View.MemoryView":202
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
goto __pyx_L5;
}
/* "View.MemoryView":205
* info.format = self.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.obj = self
*/
/*else*/ {
__pyx_v_info->format = NULL;
}
__pyx_L5:;
/* "View.MemoryView":207
* info.format = NULL
*
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":185
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":211
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* Python wrapper */
static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_array___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":212
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
__pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":213
* def __dealloc__(array self):
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data) # <<<<<<<<<<<<<<
* elif self.free_data:
* if self.dtype_is_object:
*/
__pyx_v_self->callback_free_data(__pyx_v_self->data);
/* "View.MemoryView":212
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
goto __pyx_L3;
}
/* "View.MemoryView":214
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
__pyx_t_1 = (__pyx_v_self->free_data != 0);
if (__pyx_t_1) {
/* "View.MemoryView":215
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
__pyx_t_1 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":216
* elif self.free_data:
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<<
* self._strides, self.ndim, False)
* free(self.data)
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0);
/* "View.MemoryView":215
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
}
/* "View.MemoryView":218
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
* free(self.data) # <<<<<<<<<<<<<<
* PyObject_Free(self._shape)
*
*/
free(__pyx_v_self->data);
/* "View.MemoryView":214
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
}
__pyx_L3:;
/* "View.MemoryView":219
* self._strides, self.ndim, False)
* free(self.data)
* PyObject_Free(self._shape) # <<<<<<<<<<<<<<
*
* @property
*/
PyObject_Free(__pyx_v_self->_shape);
/* "View.MemoryView":211
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":222
*
* @property
* def memview(self): # <<<<<<<<<<<<<<
* return self.get_memview()
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":223
* @property
* def memview(self):
* return self.get_memview() # <<<<<<<<<<<<<<
*
* @cname('get_memview')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":222
*
* @property
* def memview(self): # <<<<<<<<<<<<<<
* return self.get_memview()
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":226
*
* @cname('get_memview')
* cdef get_memview(self): # <<<<<<<<<<<<<<
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object)
*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) {
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_memview", 0);
/* "View.MemoryView":227
* @cname('get_memview')
* cdef get_memview(self):
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<<
* return memoryview(self, flags, self.dtype_is_object)
*
*/
__pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE);
/* "View.MemoryView":228
* cdef get_memview(self):
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":226
*
* @cname('get_memview')
* cdef get_memview(self): # <<<<<<<<<<<<<<
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":230
* return memoryview(self, flags, self.dtype_is_object)
*
* def __len__(self): # <<<<<<<<<<<<<<
* return self._shape[0]
*
*/
/* Python wrapper */
static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":231
*
* def __len__(self):
* return self._shape[0] # <<<<<<<<<<<<<<
*
* def __getattr__(self, attr):
*/
__pyx_r = (__pyx_v_self->_shape[0]);
goto __pyx_L0;
/* "View.MemoryView":230
* return memoryview(self, flags, self.dtype_is_object)
*
* def __len__(self): # <<<<<<<<<<<<<<
* return self._shape[0]
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":233
* return self._shape[0]
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getattr__", 0);
/* "View.MemoryView":234
*
* def __getattr__(self, attr):
* return getattr(self.memview, attr) # <<<<<<<<<<<<<<
*
* def __getitem__(self, item):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":233
* return self._shape[0]
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":236
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":237
*
* def __getitem__(self, item):
* return self.memview[item] # <<<<<<<<<<<<<<
*
* def __setitem__(self, item, value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":236
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":239
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* Python wrapper */
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setitem__", 0);
/* "View.MemoryView":240
*
* def __setitem__(self, item, value):
* self.memview[item] = value # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":239
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":244
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) {
struct __pyx_array_obj *__pyx_v_result = 0;
struct __pyx_array_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("array_cwrapper", 0);
/* "View.MemoryView":248
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
__pyx_t_1 = ((__pyx_v_buf == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":249
*
* if buf == NULL:
* result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<<
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(__pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":248
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":251
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
/*else*/ {
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3);
__pyx_t_4 = 0;
__pyx_t_5 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":252
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False) # <<<<<<<<<<<<<<
* result.data = buf
*
*/
__pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error)
/* "View.MemoryView":251
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
__pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5);
__pyx_t_5 = 0;
/* "View.MemoryView":253
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False)
* result.data = buf # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->data = __pyx_v_buf;
}
__pyx_L3:;
/* "View.MemoryView":255
* result.data = buf
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":244
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":281
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* Python wrapper */
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_name = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0};
PyObject* values[1] = {0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 1) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
}
__pyx_v_name = values[0];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__", 0);
/* "View.MemoryView":282
* cdef object name
* def __init__(self, name):
* self.name = name # <<<<<<<<<<<<<<
* def __repr__(self):
* return self.name
*/
__Pyx_INCREF(__pyx_v_name);
__Pyx_GIVEREF(__pyx_v_name);
__Pyx_GOTREF(__pyx_v_self->name);
__Pyx_DECREF(__pyx_v_self->name);
__pyx_v_self->name = __pyx_v_name;
/* "View.MemoryView":281
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":283
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* Python wrapper */
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":284
* self.name = name
* def __repr__(self):
* return self.name # <<<<<<<<<<<<<<
*
* cdef generic = Enum("<strided and direct or indirect>")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->name);
__pyx_r = __pyx_v_self->name;
goto __pyx_L0;
/* "View.MemoryView":283
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_v_state = 0;
PyObject *__pyx_v__dict = 0;
int __pyx_v_use_setstate;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":5
* cdef object _dict
* cdef bint use_setstate
* state = (self.name,) # <<<<<<<<<<<<<<
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
*/
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v_self->name);
__Pyx_GIVEREF(__pyx_v_self->name);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name);
__pyx_v_state = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "(tree fragment)":6
* cdef bint use_setstate
* state = (self.name,)
* _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<<
* if _dict is not None:
* state += (_dict,)
*/
__pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v__dict = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":7
* state = (self.name,)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
__pyx_t_2 = (__pyx_v__dict != Py_None);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "(tree fragment)":8
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
* state += (_dict,) # <<<<<<<<<<<<<<
* use_setstate = True
* else:
*/
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v__dict);
__Pyx_GIVEREF(__pyx_v__dict);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict);
__pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
__pyx_t_4 = 0;
/* "(tree fragment)":9
* if _dict is not None:
* state += (_dict,)
* use_setstate = True # <<<<<<<<<<<<<<
* else:
* use_setstate = self.name is not None
*/
__pyx_v_use_setstate = 1;
/* "(tree fragment)":7
* state = (self.name,)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
goto __pyx_L3;
}
/* "(tree fragment)":11
* use_setstate = True
* else:
* use_setstate = self.name is not None # <<<<<<<<<<<<<<
* if use_setstate:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
*/
/*else*/ {
__pyx_t_3 = (__pyx_v_self->name != Py_None);
__pyx_v_use_setstate = __pyx_t_3;
}
__pyx_L3:;
/* "(tree fragment)":12
* else:
* use_setstate = self.name is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
*/
__pyx_t_3 = (__pyx_v_use_setstate != 0);
if (__pyx_t_3) {
/* "(tree fragment)":13
* use_setstate = self.name is not None
* if use_setstate:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<<
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_184977713);
__Pyx_GIVEREF(__pyx_int_184977713);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None);
__pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state);
__pyx_t_4 = 0;
__pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "(tree fragment)":12
* else:
* use_setstate = self.name is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
*/
}
/* "(tree fragment)":15
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_184977713);
__Pyx_GIVEREF(__pyx_int_184977713);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1);
__pyx_t_5 = 0;
__pyx_t_1 = 0;
__pyx_r = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L0;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_state);
__Pyx_XDECREF(__pyx_v__dict);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":17
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<<
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error)
__pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":298
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) {
Py_intptr_t __pyx_v_aligned_p;
size_t __pyx_v_offset;
void *__pyx_r;
int __pyx_t_1;
/* "View.MemoryView":300
* cdef void *align_pointer(void *memory, size_t alignment) nogil:
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<<
* cdef size_t offset
*
*/
__pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory);
/* "View.MemoryView":304
*
* with cython.cdivision(True):
* offset = aligned_p % alignment # <<<<<<<<<<<<<<
*
* if offset > 0:
*/
__pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment);
/* "View.MemoryView":306
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
__pyx_t_1 = ((__pyx_v_offset > 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":307
*
* if offset > 0:
* aligned_p += alignment - offset # <<<<<<<<<<<<<<
*
* return <void *> aligned_p
*/
__pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset));
/* "View.MemoryView":306
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
}
/* "View.MemoryView":309
* aligned_p += alignment - offset
*
* return <void *> aligned_p # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = ((void *)__pyx_v_aligned_p);
goto __pyx_L0;
/* "View.MemoryView":298
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":345
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* Python wrapper */
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_obj = 0;
int __pyx_v_flags;
int __pyx_v_dtype_is_object;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object);
if (value) { values[2] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_obj = values[0];
__pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error)
if (values[2]) {
__pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error)
} else {
__pyx_v_dtype_is_object = ((int)0);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__cinit__", 0);
/* "View.MemoryView":346
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj # <<<<<<<<<<<<<<
* self.flags = flags
* if type(self) is memoryview or obj is not None:
*/
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
__Pyx_GOTREF(__pyx_v_self->obj);
__Pyx_DECREF(__pyx_v_self->obj);
__pyx_v_self->obj = __pyx_v_obj;
/* "View.MemoryView":347
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj
* self.flags = flags # <<<<<<<<<<<<<<
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
*/
__pyx_v_self->flags = __pyx_v_flags;
/* "View.MemoryView":348
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
__pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type));
__pyx_t_3 = (__pyx_t_2 != 0);
if (!__pyx_t_3) {
} else {
__pyx_t_1 = __pyx_t_3;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_3 = (__pyx_v_obj != Py_None);
__pyx_t_2 = (__pyx_t_3 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (__pyx_t_1) {
/* "View.MemoryView":349
* self.flags = flags
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<<
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
*/
__pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error)
/* "View.MemoryView":350
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":351
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None;
/* "View.MemoryView":352
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* global __pyx_memoryview_thread_locks_used
*/
Py_INCREF(Py_None);
/* "View.MemoryView":350
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
}
/* "View.MemoryView":348
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
}
/* "View.MemoryView":355
*
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<<
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
*/
__pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":356
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL:
*/
__pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
/* "View.MemoryView":357
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<<
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
*/
__pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1);
/* "View.MemoryView":355
*
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<<
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
*/
}
/* "View.MemoryView":358
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL: # <<<<<<<<<<<<<<
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":359
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<<
* if self.lock is NULL:
* raise MemoryError
*/
__pyx_v_self->lock = PyThread_allocate_lock();
/* "View.MemoryView":360
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":361
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
* raise MemoryError # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error)
/* "View.MemoryView":360
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
}
/* "View.MemoryView":358
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL: # <<<<<<<<<<<<<<
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
*/
}
/* "View.MemoryView":363
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":364
*
* if flags & PyBUF_FORMAT:
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<<
* else:
* self.dtype_is_object = dtype_is_object
*/
__pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_self->dtype_is_object = __pyx_t_1;
/* "View.MemoryView":363
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
*/
goto __pyx_L10;
}
/* "View.MemoryView":366
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
* self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<<
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
*/
/*else*/ {
__pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object;
}
__pyx_L10:;
/* "View.MemoryView":368
* self.dtype_is_object = dtype_is_object
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<<
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL
*/
__pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int))));
/* "View.MemoryView":370
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL # <<<<<<<<<<<<<<
*
* def __dealloc__(memoryview self):
*/
__pyx_v_self->typeinfo = NULL;
/* "View.MemoryView":345
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":372
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* Python wrapper */
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) {
int __pyx_v_i;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyThread_type_lock __pyx_t_6;
PyThread_type_lock __pyx_t_7;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":373
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*/
__pyx_t_1 = (__pyx_v_self->obj != Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":374
* def __dealloc__(memoryview self):
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<<
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*
*/
__Pyx_ReleaseBuffer((&__pyx_v_self->view));
/* "View.MemoryView":373
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*/
goto __pyx_L3;
}
/* "View.MemoryView":375
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<<
*
* (<__pyx_buffer *> &self.view).obj = NULL
*/
__pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":377
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*
* (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<<
* Py_DECREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_self->view))->obj = NULL;
/* "View.MemoryView":378
*
* (<__pyx_buffer *> &self.view).obj = NULL
* Py_DECREF(Py_None) # <<<<<<<<<<<<<<
*
* cdef int i
*/
Py_DECREF(Py_None);
/* "View.MemoryView":375
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<<
*
* (<__pyx_buffer *> &self.view).obj = NULL
*/
}
__pyx_L3:;
/* "View.MemoryView":382
* cdef int i
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL: # <<<<<<<<<<<<<<
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
*/
__pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":383
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<<
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
*/
__pyx_t_3 = __pyx_memoryview_thread_locks_used;
__pyx_t_4 = __pyx_t_3;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":384
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
*/
__pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":385
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<<
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
*/
__pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1);
/* "View.MemoryView":386
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
__pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":388
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<<
* break
* else:
*/
__pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
__pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]);
/* "View.MemoryView":387
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
* break
*/
(__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6;
(__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7;
/* "View.MemoryView":386
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
}
/* "View.MemoryView":389
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
* break # <<<<<<<<<<<<<<
* else:
* PyThread_free_lock(self.lock)
*/
goto __pyx_L6_break;
/* "View.MemoryView":384
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
*/
}
}
/*else*/ {
/* "View.MemoryView":391
* break
* else:
* PyThread_free_lock(self.lock) # <<<<<<<<<<<<<<
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
*/
PyThread_free_lock(__pyx_v_self->lock);
}
__pyx_L6_break:;
/* "View.MemoryView":382
* cdef int i
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL: # <<<<<<<<<<<<<<
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
*/
}
/* "View.MemoryView":372
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":393
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
Py_ssize_t __pyx_v_dim;
char *__pyx_v_itemp;
PyObject *__pyx_v_idx = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t __pyx_t_3;
PyObject *(*__pyx_t_4)(PyObject *);
PyObject *__pyx_t_5 = NULL;
Py_ssize_t __pyx_t_6;
char *__pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_item_pointer", 0);
/* "View.MemoryView":395
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<<
*
* for dim, idx in enumerate(index):
*/
__pyx_v_itemp = ((char *)__pyx_v_self->view.buf);
/* "View.MemoryView":397
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
__pyx_t_1 = 0;
if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) {
__pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0;
__pyx_t_4 = NULL;
} else {
__pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 397, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 397, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_4)) {
if (likely(PyList_CheckExact(__pyx_t_2))) {
if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
} else {
if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
}
} else {
__pyx_t_5 = __pyx_t_4(__pyx_t_2);
if (unlikely(!__pyx_t_5)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 397, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_5);
}
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5);
__pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_1;
__pyx_t_1 = (__pyx_t_1 + 1);
/* "View.MemoryView":398
*
* for dim, idx in enumerate(index):
* itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<<
*
* return itemp
*/
__pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 398, __pyx_L1_error)
__pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 398, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_7;
/* "View.MemoryView":397
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":400
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
* return itemp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_itemp;
goto __pyx_L0;
/* "View.MemoryView":393
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":403
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_indices = NULL;
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
char *__pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":404
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
__pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":405
* def __getitem__(memoryview self, object index):
* if index is Ellipsis:
* return self # <<<<<<<<<<<<<<
*
* have_slices, indices = _unellipsify(index, self.view.ndim)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__pyx_r = ((PyObject *)__pyx_v_self);
goto __pyx_L0;
/* "View.MemoryView":404
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
}
/* "View.MemoryView":407
* return self
*
* have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* cdef char *itemp
*/
__pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (likely(__pyx_t_3 != Py_None)) {
PyObject* sequence = __pyx_t_3;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 407, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_5 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
#else
__pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 407, __pyx_L1_error)
}
__pyx_v_have_slices = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_v_indices = __pyx_t_5;
__pyx_t_5 = 0;
/* "View.MemoryView":410
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
__pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 410, __pyx_L1_error)
if (__pyx_t_2) {
/* "View.MemoryView":411
* cdef char *itemp
* if have_slices:
* return memview_slice(self, indices) # <<<<<<<<<<<<<<
* else:
* itemp = self.get_item_pointer(indices)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":410
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
}
/* "View.MemoryView":413
* return memview_slice(self, indices)
* else:
* itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<<
* return self.convert_item_to_object(itemp)
*
*/
/*else*/ {
__pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 413, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_6;
/* "View.MemoryView":414
* else:
* itemp = self.get_item_pointer(indices)
* return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<<
*
* def __setitem__(memoryview self, object index, object value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":403
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_indices);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":416
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview")
*/
/* Python wrapper */
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_obj = NULL;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setitem__", 0);
__Pyx_INCREF(__pyx_v_index);
/* "View.MemoryView":417
*
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly: # <<<<<<<<<<<<<<
* raise TypeError("Cannot assign to read-only memoryview")
*
*/
__pyx_t_1 = (__pyx_v_self->view.readonly != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":418
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<<
*
* have_slices, index = _unellipsify(index, self.view.ndim)
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 418, __pyx_L1_error)
/* "View.MemoryView":417
*
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly: # <<<<<<<<<<<<<<
* raise TypeError("Cannot assign to read-only memoryview")
*
*/
}
/* "View.MemoryView":420
* raise TypeError("Cannot assign to read-only memoryview")
*
* have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* if have_slices:
*/
__pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (likely(__pyx_t_2 != Py_None)) {
PyObject* sequence = __pyx_t_2;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 420, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
#else
__pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
#endif
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 420, __pyx_L1_error)
}
__pyx_v_have_slices = __pyx_t_3;
__pyx_t_3 = 0;
__Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":422
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 422, __pyx_L1_error)
if (__pyx_t_1) {
/* "View.MemoryView":423
*
* if have_slices:
* obj = self.is_slice(value) # <<<<<<<<<<<<<<
* if obj:
* self.setitem_slice_assignment(self[index], obj)
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_v_obj = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":424
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 424, __pyx_L1_error)
if (__pyx_t_1) {
/* "View.MemoryView":425
* obj = self.is_slice(value)
* if obj:
* self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<<
* else:
* self.setitem_slice_assign_scalar(self[index], value)
*/
__pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 425, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
/* "View.MemoryView":424
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
goto __pyx_L5;
}
/* "View.MemoryView":427
* self.setitem_slice_assignment(self[index], obj)
* else:
* self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<<
* else:
* self.setitem_indexed(index, value)
*/
/*else*/ {
__pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 427, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 427, __pyx_L1_error)
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 427, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L5:;
/* "View.MemoryView":422
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
goto __pyx_L4;
}
/* "View.MemoryView":429
* self.setitem_slice_assign_scalar(self[index], value)
* else:
* self.setitem_indexed(index, value) # <<<<<<<<<<<<<<
*
* cdef is_slice(self, obj):
*/
/*else*/ {
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 429, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L4:;
/* "View.MemoryView":416
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview")
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":431
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_slice", 0);
__Pyx_INCREF(__pyx_v_obj);
/* "View.MemoryView":432
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":433
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_5);
/*try:*/ {
/* "View.MemoryView":434
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 434, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":435
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object) # <<<<<<<<<<<<<<
* except TypeError:
* return None
*/
__pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 435, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_7);
/* "View.MemoryView":434
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 434, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7);
__pyx_t_6 = 0;
__pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 434, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7);
__pyx_t_7 = 0;
/* "View.MemoryView":433
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
}
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
goto __pyx_L9_try_end;
__pyx_L4_error:;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
/* "View.MemoryView":436
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
* except TypeError: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError);
if (__pyx_t_9) {
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 436, __pyx_L6_except_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_GOTREF(__pyx_t_8);
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":437
* self.dtype_is_object)
* except TypeError:
* return None # <<<<<<<<<<<<<<
*
* return obj
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
goto __pyx_L7_except_return;
}
goto __pyx_L6_except_error;
__pyx_L6_except_error:;
/* "View.MemoryView":433
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L1_error;
__pyx_L7_except_return:;
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L0;
__pyx_L9_try_end:;
}
/* "View.MemoryView":432
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
*/
}
/* "View.MemoryView":439
* return None
*
* return obj # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assignment(self, dst, src):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_obj);
__pyx_r = __pyx_v_obj;
goto __pyx_L0;
/* "View.MemoryView":431
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":441
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) {
__Pyx_memviewslice __pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_src_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
__Pyx_memviewslice *__pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_slice_assignment", 0);
/* "View.MemoryView":445
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 445, __pyx_L1_error)
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 445, __pyx_L1_error)
/* "View.MemoryView":446
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<<
* src.ndim, dst.ndim, self.dtype_is_object)
*
*/
if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 446, __pyx_L1_error)
__pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 446, __pyx_L1_error)
/* "View.MemoryView":447
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":445
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
__pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 445, __pyx_L1_error)
/* "View.MemoryView":441
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":449
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) {
int __pyx_v_array[0x80];
void *__pyx_v_tmp;
void *__pyx_v_item;
__Pyx_memviewslice *__pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_tmp_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
char const *__pyx_t_6;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
PyObject *__pyx_t_10 = NULL;
PyObject *__pyx_t_11 = NULL;
PyObject *__pyx_t_12 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0);
/* "View.MemoryView":451
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
* cdef int array[128]
* cdef void *tmp = NULL # <<<<<<<<<<<<<<
* cdef void *item
*
*/
__pyx_v_tmp = NULL;
/* "View.MemoryView":456
* cdef __Pyx_memviewslice *dst_slice
* cdef __Pyx_memviewslice tmp_slice
* dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<<
*
* if <size_t>self.view.itemsize > sizeof(array):
*/
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 456, __pyx_L1_error)
__pyx_v_dst_slice = __pyx_t_1;
/* "View.MemoryView":458
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
__pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":459
*
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<<
* if tmp == NULL:
* raise MemoryError
*/
__pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize);
/* "View.MemoryView":460
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
__pyx_t_2 = ((__pyx_v_tmp == NULL) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":461
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
* raise MemoryError # <<<<<<<<<<<<<<
* item = tmp
* else:
*/
PyErr_NoMemory(); __PYX_ERR(1, 461, __pyx_L1_error)
/* "View.MemoryView":460
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
}
/* "View.MemoryView":462
* if tmp == NULL:
* raise MemoryError
* item = tmp # <<<<<<<<<<<<<<
* else:
* item = <void *> array
*/
__pyx_v_item = __pyx_v_tmp;
/* "View.MemoryView":458
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
goto __pyx_L3;
}
/* "View.MemoryView":464
* item = tmp
* else:
* item = <void *> array # <<<<<<<<<<<<<<
*
* try:
*/
/*else*/ {
__pyx_v_item = ((void *)__pyx_v_array);
}
__pyx_L3:;
/* "View.MemoryView":466
* item = <void *> array
*
* try: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value
*/
/*try:*/ {
/* "View.MemoryView":467
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
__pyx_t_2 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_2) {
/* "View.MemoryView":468
* try:
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<<
* else:
* self.assign_item_from_object(<char *> item, value)
*/
(((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value);
/* "View.MemoryView":467
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
goto __pyx_L8;
}
/* "View.MemoryView":470
* (<PyObject **> item)[0] = <PyObject *> value
* else:
* self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<<
*
*
*/
/*else*/ {
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 470, __pyx_L6_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
__pyx_L8:;
/* "View.MemoryView":474
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
__pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":475
*
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<<
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
* item, self.dtype_is_object)
*/
__pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 475, __pyx_L6_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":474
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
}
/* "View.MemoryView":476
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<<
* item, self.dtype_is_object)
* finally:
*/
__pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object);
}
/* "View.MemoryView":479
* item, self.dtype_is_object)
* finally:
* PyMem_Free(tmp) # <<<<<<<<<<<<<<
*
* cdef setitem_indexed(self, index, value):
*/
/*finally:*/ {
/*normal exit:*/{
PyMem_Free(__pyx_v_tmp);
goto __pyx_L7;
}
__pyx_L6_error:;
/*exception exit:*/{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12);
if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9);
__Pyx_XGOTREF(__pyx_t_7);
__Pyx_XGOTREF(__pyx_t_8);
__Pyx_XGOTREF(__pyx_t_9);
__Pyx_XGOTREF(__pyx_t_10);
__Pyx_XGOTREF(__pyx_t_11);
__Pyx_XGOTREF(__pyx_t_12);
__pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename;
{
PyMem_Free(__pyx_v_tmp);
}
if (PY_MAJOR_VERSION >= 3) {
__Pyx_XGIVEREF(__pyx_t_10);
__Pyx_XGIVEREF(__pyx_t_11);
__Pyx_XGIVEREF(__pyx_t_12);
__Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12);
}
__Pyx_XGIVEREF(__pyx_t_7);
__Pyx_XGIVEREF(__pyx_t_8);
__Pyx_XGIVEREF(__pyx_t_9);
__Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9);
__pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0;
__pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6;
goto __pyx_L1_error;
}
__pyx_L7:;
}
/* "View.MemoryView":449
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":481
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
char *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_indexed", 0);
/* "View.MemoryView":482
*
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<<
* self.assign_item_from_object(itemp, value)
*
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 482, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_1;
/* "View.MemoryView":483
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 483, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":481
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":485
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_v_struct = NULL;
PyObject *__pyx_v_bytesitem = 0;
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
int __pyx_t_8;
PyObject *__pyx_t_9 = NULL;
size_t __pyx_t_10;
int __pyx_t_11;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":488
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef bytes bytesitem
*
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":491
* cdef bytes bytesitem
*
* bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<<
* try:
* result = struct.unpack(self.view.format, bytesitem)
*/
__pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 491, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_bytesitem = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":492
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
/*try:*/ {
/* "View.MemoryView":493
* bytesitem = itemp[:self.view.itemsize]
* try:
* result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<<
* except struct.error:
* raise ValueError("Unable to convert item to object")
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = NULL;
__pyx_t_8 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
__pyx_t_8 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
__pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
__pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
{
__pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_9);
if (__pyx_t_7) {
__Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL;
}
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6);
__Pyx_INCREF(__pyx_v_bytesitem);
__Pyx_GIVEREF(__pyx_v_bytesitem);
PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem);
__pyx_t_6 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":492
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
}
/* "View.MemoryView":497
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
/*else:*/ {
__pyx_t_10 = strlen(__pyx_v_self->view.format);
__pyx_t_11 = ((__pyx_t_10 == 1) != 0);
if (__pyx_t_11) {
/* "View.MemoryView":498
* else:
* if len(self.view.format) == 1:
* return result[0] # <<<<<<<<<<<<<<
* return result
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 498, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L6_except_return;
/* "View.MemoryView":497
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
}
/* "View.MemoryView":499
* if len(self.view.format) == 1:
* return result[0]
* return result # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_result);
__pyx_r = __pyx_v_result;
goto __pyx_L6_except_return;
}
__pyx_L3_error:;
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
/* "View.MemoryView":494
* try:
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error: # <<<<<<<<<<<<<<
* raise ValueError("Unable to convert item to object")
* else:
*/
__Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 494, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9);
__pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0;
if (__pyx_t_8) {
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 494, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_1);
/* "View.MemoryView":495
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 495, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(1, 495, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "View.MemoryView":492
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L1_error;
__pyx_L6_except_return:;
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L0;
}
/* "View.MemoryView":485
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesitem);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":501
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_v_struct = NULL;
char __pyx_v_c;
PyObject *__pyx_v_bytesvalue = 0;
Py_ssize_t __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
int __pyx_t_7;
PyObject *__pyx_t_8 = NULL;
Py_ssize_t __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
char *__pyx_t_11;
char *__pyx_t_12;
char *__pyx_t_13;
char *__pyx_t_14;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":504
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef char c
* cdef bytes bytesvalue
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 504, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":509
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
__pyx_t_2 = PyTuple_Check(__pyx_v_value);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "View.MemoryView":510
*
* if isinstance(value, tuple):
* bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<<
* else:
* bytesvalue = struct.pack(self.view.format, value)
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 510, __pyx_L1_error)
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":509
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":512
* bytesvalue = struct.pack(self.view.format, *value)
* else:
* bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<<
*
* for i, c in enumerate(bytesvalue):
*/
/*else*/ {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = NULL;
__pyx_t_7 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_6, function);
__pyx_t_7 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
__pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
__pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
{
__pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
if (__pyx_t_5) {
__Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL;
}
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1);
__Pyx_INCREF(__pyx_v_value);
__Pyx_GIVEREF(__pyx_v_value);
PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value);
__pyx_t_1 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 512, __pyx_L1_error)
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
}
__pyx_L3:;
/* "View.MemoryView":514
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_9 = 0;
if (unlikely(__pyx_v_bytesvalue == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable");
__PYX_ERR(1, 514, __pyx_L1_error)
}
__Pyx_INCREF(__pyx_v_bytesvalue);
__pyx_t_10 = __pyx_v_bytesvalue;
__pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10);
__pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10));
for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) {
__pyx_t_11 = __pyx_t_14;
__pyx_v_c = (__pyx_t_11[0]);
/* "View.MemoryView":515
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
__pyx_v_i = __pyx_t_9;
/* "View.MemoryView":514
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_9 = (__pyx_t_9 + 1);
/* "View.MemoryView":515
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
(__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c;
}
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
/* "View.MemoryView":501
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesvalue);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":518
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t *__pyx_t_4;
char *__pyx_t_5;
void *__pyx_t_6;
int __pyx_t_7;
Py_ssize_t __pyx_t_8;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
if (__pyx_v_info == NULL) {
PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
return -1;
}
__Pyx_RefNannySetupContext("__getbuffer__", 0);
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
/* "View.MemoryView":519
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<<
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
*/
__pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_2 = (__pyx_v_self->view.readonly != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":520
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<<
*
* if flags & PyBUF_ND:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 520, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 520, __pyx_L1_error)
/* "View.MemoryView":519
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<<
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
*/
}
/* "View.MemoryView":522
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
* if flags & PyBUF_ND: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":523
*
* if flags & PyBUF_ND:
* info.shape = self.view.shape # <<<<<<<<<<<<<<
* else:
* info.shape = NULL
*/
__pyx_t_4 = __pyx_v_self->view.shape;
__pyx_v_info->shape = __pyx_t_4;
/* "View.MemoryView":522
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
* if flags & PyBUF_ND: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
goto __pyx_L6;
}
/* "View.MemoryView":525
* info.shape = self.view.shape
* else:
* info.shape = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_STRIDES:
*/
/*else*/ {
__pyx_v_info->shape = NULL;
}
__pyx_L6:;
/* "View.MemoryView":527
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":528
*
* if flags & PyBUF_STRIDES:
* info.strides = self.view.strides # <<<<<<<<<<<<<<
* else:
* info.strides = NULL
*/
__pyx_t_4 = __pyx_v_self->view.strides;
__pyx_v_info->strides = __pyx_t_4;
/* "View.MemoryView":527
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
goto __pyx_L7;
}
/* "View.MemoryView":530
* info.strides = self.view.strides
* else:
* info.strides = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_INDIRECT:
*/
/*else*/ {
__pyx_v_info->strides = NULL;
}
__pyx_L7:;
/* "View.MemoryView":532
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":533
*
* if flags & PyBUF_INDIRECT:
* info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<<
* else:
* info.suboffsets = NULL
*/
__pyx_t_4 = __pyx_v_self->view.suboffsets;
__pyx_v_info->suboffsets = __pyx_t_4;
/* "View.MemoryView":532
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
goto __pyx_L8;
}
/* "View.MemoryView":535
* info.suboffsets = self.view.suboffsets
* else:
* info.suboffsets = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
/*else*/ {
__pyx_v_info->suboffsets = NULL;
}
__pyx_L8:;
/* "View.MemoryView":537
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":538
*
* if flags & PyBUF_FORMAT:
* info.format = self.view.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_5 = __pyx_v_self->view.format;
__pyx_v_info->format = __pyx_t_5;
/* "View.MemoryView":537
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
goto __pyx_L9;
}
/* "View.MemoryView":540
* info.format = self.view.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.buf = self.view.buf
*/
/*else*/ {
__pyx_v_info->format = NULL;
}
__pyx_L9:;
/* "View.MemoryView":542
* info.format = NULL
*
* info.buf = self.view.buf # <<<<<<<<<<<<<<
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
*/
__pyx_t_6 = __pyx_v_self->view.buf;
__pyx_v_info->buf = __pyx_t_6;
/* "View.MemoryView":543
*
* info.buf = self.view.buf
* info.ndim = self.view.ndim # <<<<<<<<<<<<<<
* info.itemsize = self.view.itemsize
* info.len = self.view.len
*/
__pyx_t_7 = __pyx_v_self->view.ndim;
__pyx_v_info->ndim = __pyx_t_7;
/* "View.MemoryView":544
* info.buf = self.view.buf
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize # <<<<<<<<<<<<<<
* info.len = self.view.len
* info.readonly = self.view.readonly
*/
__pyx_t_8 = __pyx_v_self->view.itemsize;
__pyx_v_info->itemsize = __pyx_t_8;
/* "View.MemoryView":545
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
* info.len = self.view.len # <<<<<<<<<<<<<<
* info.readonly = self.view.readonly
* info.obj = self
*/
__pyx_t_8 = __pyx_v_self->view.len;
__pyx_v_info->len = __pyx_t_8;
/* "View.MemoryView":546
* info.itemsize = self.view.itemsize
* info.len = self.view.len
* info.readonly = self.view.readonly # <<<<<<<<<<<<<<
* info.obj = self
*
*/
__pyx_t_1 = __pyx_v_self->view.readonly;
__pyx_v_info->readonly = __pyx_t_1;
/* "View.MemoryView":547
* info.len = self.view.len
* info.readonly = self.view.readonly
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":518
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":553
*
* @property
* def T(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":554
* @property
* def T(self):
* cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<<
* transpose_memslice(&result.from_slice)
* return result
*/
__pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 554, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 554, __pyx_L1_error)
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":555
* def T(self):
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 555, __pyx_L1_error)
/* "View.MemoryView":556
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
* return result # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":553
*
* @property
* def T(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":559
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":560
* @property
* def base(self):
* return self.obj # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->obj);
__pyx_r = __pyx_v_self->obj;
goto __pyx_L0;
/* "View.MemoryView":559
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":563
*
* @property
* def shape(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_length;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":564
* @property
* def shape(self):
* return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 564, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
__pyx_t_2 = __pyx_t_4;
__pyx_v_length = (__pyx_t_2[0]);
__pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 564, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":563
*
* @property
* def shape(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":567
*
* @property
* def strides(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_stride;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":568
* @property
* def strides(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
__pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":570
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 570, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 570, __pyx_L1_error)
/* "View.MemoryView":568
* @property
* def strides(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
}
/* "View.MemoryView":572
* raise ValueError("Buffer view does not expose strides")
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_v_stride = (__pyx_t_3[0]);
__pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
}
__pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L0;
/* "View.MemoryView":567
*
* @property
* def strides(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":575
*
* @property
* def suboffsets(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
Py_ssize_t *__pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":576
* @property
* def suboffsets(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
__pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":577
* def suboffsets(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_tuple__16, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":576
* @property
* def suboffsets(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
}
/* "View.MemoryView":579
* return (-1,) * self.view.ndim
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim);
for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) {
__pyx_t_4 = __pyx_t_6;
__pyx_v_suboffset = (__pyx_t_4[0]);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":575
*
* @property
* def suboffsets(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":582
*
* @property
* def ndim(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":583
* @property
* def ndim(self):
* return self.view.ndim # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":582
*
* @property
* def ndim(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":586
*
* @property
* def itemsize(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":587
* @property
* def itemsize(self):
* return self.view.itemsize # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":586
*
* @property
* def itemsize(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":590
*
* @property
* def nbytes(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":591
* @property
* def nbytes(self):
* return self.size * self.view.itemsize # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 591, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 591, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 591, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":590
*
* @property
* def nbytes(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":594
*
* @property
* def size(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":595
* @property
* def size(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
__pyx_t_1 = (__pyx_v_self->_size == Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":596
* def size(self):
* if self._size is None:
* result = 1 # <<<<<<<<<<<<<<
*
* for length in self.view.shape[:self.view.ndim]:
*/
__Pyx_INCREF(__pyx_int_1);
__pyx_v_result = __pyx_int_1;
/* "View.MemoryView":598
* result = 1
*
* for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<<
* result *= length
*
*/
__pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 598, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6);
__pyx_t_6 = 0;
/* "View.MemoryView":599
*
* for length in self.view.shape[:self.view.ndim]:
* result *= length # <<<<<<<<<<<<<<
*
* self._size = result
*/
__pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 599, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6);
__pyx_t_6 = 0;
}
/* "View.MemoryView":601
* result *= length
*
* self._size = result # <<<<<<<<<<<<<<
*
* return self._size
*/
__Pyx_INCREF(__pyx_v_result);
__Pyx_GIVEREF(__pyx_v_result);
__Pyx_GOTREF(__pyx_v_self->_size);
__Pyx_DECREF(__pyx_v_self->_size);
__pyx_v_self->_size = __pyx_v_result;
/* "View.MemoryView":595
* @property
* def size(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
}
/* "View.MemoryView":603
* self._size = result
*
* return self._size # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->_size);
__pyx_r = __pyx_v_self->_size;
goto __pyx_L0;
/* "View.MemoryView":594
*
* @property
* def size(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":605
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* Python wrapper */
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":606
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
__pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":607
* def __len__(self):
* if self.view.ndim >= 1:
* return self.view.shape[0] # <<<<<<<<<<<<<<
*
* return 0
*/
__pyx_r = (__pyx_v_self->view.shape[0]);
goto __pyx_L0;
/* "View.MemoryView":606
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
}
/* "View.MemoryView":609
* return self.view.shape[0]
*
* return 0 # <<<<<<<<<<<<<<
*
* def __repr__(self):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":605
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":611
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":612
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":613
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self)) # <<<<<<<<<<<<<<
*
* def __str__(self):
*/
__pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 613, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
/* "View.MemoryView":612
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":611
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":615
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__str__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__str__", 0);
/* "View.MemoryView":616
*
* def __str__(self):
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":615
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":619
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_c_contig", 0);
/* "View.MemoryView":622
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
*/
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 622, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":623
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<<
*
* def is_f_contig(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 623, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":619
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":625
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_f_contig", 0);
/* "View.MemoryView":628
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
*/
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 628, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":629
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<<
*
* def copy(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 629, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":625
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":631
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_mslice;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("copy", 0);
/* "View.MemoryView":633
* def copy(self):
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &mslice)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS));
/* "View.MemoryView":635
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*
* slice_copy(self, &mslice) # <<<<<<<<<<<<<<
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice));
/* "View.MemoryView":636
*
* slice_copy(self, &mslice)
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_C_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 636, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":641
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<<
*
* def copy_fortran(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 641, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":631
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":643
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("copy_fortran", 0);
/* "View.MemoryView":645
* def copy_fortran(self):
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &src)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS));
/* "View.MemoryView":647
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*
* slice_copy(self, &src) # <<<<<<<<<<<<<<
* dst = slice_copy_contig(&src, "fortran", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src));
/* "View.MemoryView":648
*
* slice_copy(self, &src)
* dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_F_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 648, __pyx_L1_error)
__pyx_v_dst = __pyx_t_1;
/* "View.MemoryView":653
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 653, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":643
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":657
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) {
struct __pyx_memoryview_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_cwrapper", 0);
/* "View.MemoryView":658
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<<
* result.typeinfo = typeinfo
* return result
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_o);
__Pyx_GIVEREF(__pyx_v_o);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":659
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_v_result->typeinfo = __pyx_v_typeinfo;
/* "View.MemoryView":660
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_check')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":657
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":663
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("memoryview_check", 0);
/* "View.MemoryView":664
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o):
* return isinstance(o, memoryview) # <<<<<<<<<<<<<<
*
* cdef tuple _unellipsify(object index, int ndim):
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type);
__pyx_r = __pyx_t_1;
goto __pyx_L0;
/* "View.MemoryView":663
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":666
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) {
PyObject *__pyx_v_tup = NULL;
PyObject *__pyx_v_result = NULL;
int __pyx_v_have_slices;
int __pyx_v_seen_ellipsis;
CYTHON_UNUSED PyObject *__pyx_v_idx = NULL;
PyObject *__pyx_v_item = NULL;
Py_ssize_t __pyx_v_nslices;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
Py_ssize_t __pyx_t_5;
PyObject *(*__pyx_t_6)(PyObject *);
PyObject *__pyx_t_7 = NULL;
Py_ssize_t __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
PyObject *__pyx_t_11 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("_unellipsify", 0);
/* "View.MemoryView":671
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
__pyx_t_1 = PyTuple_Check(__pyx_v_index);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":672
* """
* if not isinstance(index, tuple):
* tup = (index,) # <<<<<<<<<<<<<<
* else:
* tup = index
*/
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_index);
__Pyx_GIVEREF(__pyx_v_index);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index);
__pyx_v_tup = __pyx_t_3;
__pyx_t_3 = 0;
/* "View.MemoryView":671
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":674
* tup = (index,)
* else:
* tup = index # <<<<<<<<<<<<<<
*
* result = []
*/
/*else*/ {
__Pyx_INCREF(__pyx_v_index);
__pyx_v_tup = __pyx_v_index;
}
__pyx_L3:;
/* "View.MemoryView":676
* tup = index
*
* result = [] # <<<<<<<<<<<<<<
* have_slices = False
* seen_ellipsis = False
*/
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 676, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_v_result = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":677
*
* result = []
* have_slices = False # <<<<<<<<<<<<<<
* seen_ellipsis = False
* for idx, item in enumerate(tup):
*/
__pyx_v_have_slices = 0;
/* "View.MemoryView":678
* result = []
* have_slices = False
* seen_ellipsis = False # <<<<<<<<<<<<<<
* for idx, item in enumerate(tup):
* if item is Ellipsis:
*/
__pyx_v_seen_ellipsis = 0;
/* "View.MemoryView":679
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
__Pyx_INCREF(__pyx_int_0);
__pyx_t_3 = __pyx_int_0;
if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) {
__pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0;
__pyx_t_6 = NULL;
} else {
__pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 679, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_6)) {
if (likely(PyList_CheckExact(__pyx_t_4))) {
if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error)
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
} else {
if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error)
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
}
} else {
__pyx_t_7 = __pyx_t_6(__pyx_t_4);
if (unlikely(!__pyx_t_7)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 679, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_7);
}
__Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7);
__pyx_t_7 = 0;
__Pyx_INCREF(__pyx_t_3);
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3);
__pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_3);
__pyx_t_3 = __pyx_t_7;
__pyx_t_7 = 0;
/* "View.MemoryView":680
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
__pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":681
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
__pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":682
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 682, __pyx_L1_error)
__pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 682, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__19);
__Pyx_GIVEREF(__pyx_slice__19);
PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__19);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 682, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
/* "View.MemoryView":683
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True # <<<<<<<<<<<<<<
* else:
* result.append(slice(None))
*/
__pyx_v_seen_ellipsis = 1;
/* "View.MemoryView":681
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
goto __pyx_L7;
}
/* "View.MemoryView":685
* seen_ellipsis = True
* else:
* result.append(slice(None)) # <<<<<<<<<<<<<<
* have_slices = True
* else:
*/
/*else*/ {
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__19); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 685, __pyx_L1_error)
}
__pyx_L7:;
/* "View.MemoryView":686
* else:
* result.append(slice(None))
* have_slices = True # <<<<<<<<<<<<<<
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
*/
__pyx_v_have_slices = 1;
/* "View.MemoryView":680
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
goto __pyx_L6;
}
/* "View.MemoryView":688
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
/*else*/ {
__pyx_t_2 = PySlice_Check(__pyx_v_item);
__pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0);
__pyx_t_1 = __pyx_t_10;
__pyx_L9_bool_binop_done:;
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":689
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
* raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<<
*
* have_slices = have_slices or isinstance(item, slice)
*/
__pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 689, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 689, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_Raise(__pyx_t_11, 0, 0, 0);
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
__PYX_ERR(1, 689, __pyx_L1_error)
/* "View.MemoryView":688
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
}
/* "View.MemoryView":691
* raise TypeError("Cannot index with type '%s'" % type(item))
*
* have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<<
* result.append(item)
*
*/
__pyx_t_10 = (__pyx_v_have_slices != 0);
if (!__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = PySlice_Check(__pyx_v_item);
__pyx_t_2 = (__pyx_t_10 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_have_slices = __pyx_t_1;
/* "View.MemoryView":692
*
* have_slices = have_slices or isinstance(item, slice)
* result.append(item) # <<<<<<<<<<<<<<
*
* nslices = ndim - len(result)
*/
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error)
}
__pyx_L6:;
/* "View.MemoryView":679
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":694
* result.append(item)
*
* nslices = ndim - len(result) # <<<<<<<<<<<<<<
* if nslices:
* result.extend([slice(None)] * nslices)
*/
__pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 694, __pyx_L1_error)
__pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5);
/* "View.MemoryView":695
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
__pyx_t_1 = (__pyx_v_nslices != 0);
if (__pyx_t_1) {
/* "View.MemoryView":696
* nslices = ndim - len(result)
* if nslices:
* result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<<
*
* return have_slices or nslices, tuple(result)
*/
__pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 696, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__19);
__Pyx_GIVEREF(__pyx_slice__19);
PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__19);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 696, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":695
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
}
/* "View.MemoryView":698
* result.extend([slice(None)] * nslices)
*
* return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<<
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
*/
__Pyx_XDECREF(__pyx_r);
if (!__pyx_v_have_slices) {
} else {
__pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L14_bool_binop_done;
}
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_L14_bool_binop_done:;
__pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4);
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_r = ((PyObject*)__pyx_t_11);
__pyx_t_11 = 0;
goto __pyx_L0;
/* "View.MemoryView":666
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_11);
__Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_tup);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_XDECREF(__pyx_v_item);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":700
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assert_direct_dimensions", 0);
/* "View.MemoryView":701
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported")
*/
__pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim);
for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) {
__pyx_t_1 = __pyx_t_3;
__pyx_v_suboffset = (__pyx_t_1[0]);
/* "View.MemoryView":702
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
__pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":703
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 703, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__PYX_ERR(1, 703, __pyx_L1_error)
/* "View.MemoryView":702
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
}
}
/* "View.MemoryView":700
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":710
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) {
int __pyx_v_new_ndim;
int __pyx_v_suboffset_dim;
int __pyx_v_dim;
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
__Pyx_memviewslice *__pyx_v_p_src;
struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0;
__Pyx_memviewslice *__pyx_v_p_dst;
int *__pyx_v_p_suboffset_dim;
Py_ssize_t __pyx_v_start;
Py_ssize_t __pyx_v_stop;
Py_ssize_t __pyx_v_step;
int __pyx_v_have_start;
int __pyx_v_have_stop;
int __pyx_v_have_step;
PyObject *__pyx_v_index = NULL;
struct __pyx_memoryview_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
struct __pyx_memoryview_obj *__pyx_t_4;
char *__pyx_t_5;
int __pyx_t_6;
Py_ssize_t __pyx_t_7;
PyObject *(*__pyx_t_8)(PyObject *);
PyObject *__pyx_t_9 = NULL;
Py_ssize_t __pyx_t_10;
int __pyx_t_11;
Py_ssize_t __pyx_t_12;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memview_slice", 0);
/* "View.MemoryView":711
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices):
* cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<<
* cdef bint negative_step
* cdef __Pyx_memviewslice src, dst
*/
__pyx_v_new_ndim = 0;
__pyx_v_suboffset_dim = -1;
/* "View.MemoryView":718
*
*
* memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<<
*
* cdef _memoryviewslice memviewsliceobj
*/
(void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst))));
/* "View.MemoryView":722
* cdef _memoryviewslice memviewsliceobj
*
* assert memview.view.ndim > 0 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(1, 722, __pyx_L1_error)
}
}
#endif
/* "View.MemoryView":724
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":725
*
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview # <<<<<<<<<<<<<<
* p_src = &memviewsliceobj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 725, __pyx_L1_error)
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":726
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, &src)
*/
__pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice);
/* "View.MemoryView":724
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
goto __pyx_L3;
}
/* "View.MemoryView":728
* p_src = &memviewsliceobj.from_slice
* else:
* slice_copy(memview, &src) # <<<<<<<<<<<<<<
* p_src = &src
*
*/
/*else*/ {
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src));
/* "View.MemoryView":729
* else:
* slice_copy(memview, &src)
* p_src = &src # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_p_src = (&__pyx_v_src);
}
__pyx_L3:;
/* "View.MemoryView":735
*
*
* dst.memview = p_src.memview # <<<<<<<<<<<<<<
* dst.data = p_src.data
*
*/
__pyx_t_4 = __pyx_v_p_src->memview;
__pyx_v_dst.memview = __pyx_t_4;
/* "View.MemoryView":736
*
* dst.memview = p_src.memview
* dst.data = p_src.data # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_v_p_src->data;
__pyx_v_dst.data = __pyx_t_5;
/* "View.MemoryView":741
*
*
* cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<<
* cdef int *p_suboffset_dim = &suboffset_dim
* cdef Py_ssize_t start, stop, step
*/
__pyx_v_p_dst = (&__pyx_v_dst);
/* "View.MemoryView":742
*
* cdef __Pyx_memviewslice *p_dst = &dst
* cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<<
* cdef Py_ssize_t start, stop, step
* cdef bint have_start, have_stop, have_step
*/
__pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim);
/* "View.MemoryView":746
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
__pyx_t_6 = 0;
if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) {
__pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0;
__pyx_t_8 = NULL;
} else {
__pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 746, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_8)) {
if (likely(PyList_CheckExact(__pyx_t_3))) {
if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
} else {
if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
}
} else {
__pyx_t_9 = __pyx_t_8(__pyx_t_3);
if (unlikely(!__pyx_t_9)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 746, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_9);
}
__Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_v_dim = __pyx_t_6;
__pyx_t_6 = (__pyx_t_6 + 1);
/* "View.MemoryView":747
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
__pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":751
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
* index, 0, 0, # start, stop, step # <<<<<<<<<<<<<<
* 0, 0, 0, # have_{start,stop,step}
* False)
*/
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 751, __pyx_L1_error)
/* "View.MemoryView":748
* for dim, index in enumerate(indices):
* if PyIndex_Check(index):
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 748, __pyx_L1_error)
/* "View.MemoryView":747
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
goto __pyx_L6;
}
/* "View.MemoryView":754
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
__pyx_t_2 = (__pyx_v_index == Py_None);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":755
* False)
* elif index is None:
* p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<<
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
*/
(__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1;
/* "View.MemoryView":756
* elif index is None:
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<<
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1
*/
(__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0;
/* "View.MemoryView":757
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<<
* new_ndim += 1
* else:
*/
(__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L;
/* "View.MemoryView":758
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1 # <<<<<<<<<<<<<<
* else:
* start = index.start or 0
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
/* "View.MemoryView":754
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
goto __pyx_L6;
}
/* "View.MemoryView":760
* new_ndim += 1
* else:
* start = index.start or 0 # <<<<<<<<<<<<<<
* stop = index.stop or 0
* step = index.step or 0
*/
/*else*/ {
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 760, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 760, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L7_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L7_bool_binop_done:;
__pyx_v_start = __pyx_t_10;
/* "View.MemoryView":761
* else:
* start = index.start or 0
* stop = index.stop or 0 # <<<<<<<<<<<<<<
* step = index.step or 0
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 761, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 761, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L9_bool_binop_done:;
__pyx_v_stop = __pyx_t_10;
/* "View.MemoryView":762
* start = index.start or 0
* stop = index.stop or 0
* step = index.step or 0 # <<<<<<<<<<<<<<
*
* have_start = index.start is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 762, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 762, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L11_bool_binop_done:;
__pyx_v_step = __pyx_t_10;
/* "View.MemoryView":764
* step = index.step or 0
*
* have_start = index.start is not None # <<<<<<<<<<<<<<
* have_stop = index.stop is not None
* have_step = index.step is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 764, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_start = __pyx_t_1;
/* "View.MemoryView":765
*
* have_start = index.start is not None
* have_stop = index.stop is not None # <<<<<<<<<<<<<<
* have_step = index.step is not None
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 765, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_stop = __pyx_t_1;
/* "View.MemoryView":766
* have_start = index.start is not None
* have_stop = index.stop is not None
* have_step = index.step is not None # <<<<<<<<<<<<<<
*
* slice_memviewslice(
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 766, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_step = __pyx_t_1;
/* "View.MemoryView":768
* have_step = index.step is not None
*
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 768, __pyx_L1_error)
/* "View.MemoryView":774
* have_start, have_stop, have_step,
* True)
* new_ndim += 1 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
}
__pyx_L6:;
/* "View.MemoryView":746
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":776
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":777
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":778
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func, # <<<<<<<<<<<<<<
* memviewsliceobj.to_dtype_func,
* memview.dtype_is_object)
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 778, __pyx_L1_error) }
/* "View.MemoryView":779
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
* else:
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 779, __pyx_L1_error) }
/* "View.MemoryView":777
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 777, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 777, __pyx_L1_error)
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":776
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
}
/* "View.MemoryView":782
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
/*else*/ {
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":783
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 782, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
/* "View.MemoryView":782
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 782, __pyx_L1_error)
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":710
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":807
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) {
Py_ssize_t __pyx_v_new_shape;
int __pyx_v_negative_step;
int __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":827
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
__pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":829
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
__pyx_t_1 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":830
*
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":829
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
}
/* "View.MemoryView":831
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
__pyx_t_1 = (0 <= __pyx_v_start);
if (__pyx_t_1) {
__pyx_t_1 = (__pyx_v_start < __pyx_v_shape);
}
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":832
* start += shape
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<<
* else:
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 832, __pyx_L1_error)
/* "View.MemoryView":831
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
}
/* "View.MemoryView":827
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
goto __pyx_L3;
}
/* "View.MemoryView":835
* else:
*
* negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<<
*
* if have_step and step == 0:
*/
/*else*/ {
__pyx_t_1 = ((__pyx_v_have_step != 0) != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step < 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L6_bool_binop_done:;
__pyx_v_negative_step = __pyx_t_2;
/* "View.MemoryView":837
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
__pyx_t_1 = (__pyx_v_have_step != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step == 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L9_bool_binop_done:;
if (__pyx_t_2) {
/* "View.MemoryView":838
*
* if have_step and step == 0:
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 838, __pyx_L1_error)
/* "View.MemoryView":837
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
}
/* "View.MemoryView":841
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
__pyx_t_2 = (__pyx_v_have_start != 0);
if (__pyx_t_2) {
/* "View.MemoryView":842
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":843
* if have_start:
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if start < 0:
* start = 0
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":844
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":845
* start += shape
* if start < 0:
* start = 0 # <<<<<<<<<<<<<<
* elif start >= shape:
* if negative_step:
*/
__pyx_v_start = 0;
/* "View.MemoryView":844
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
}
/* "View.MemoryView":842
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
goto __pyx_L12;
}
/* "View.MemoryView":846
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
__pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":847
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":848
* elif start >= shape:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = shape
*/
__pyx_v_start = (__pyx_v_shape - 1);
/* "View.MemoryView":847
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
goto __pyx_L14;
}
/* "View.MemoryView":850
* start = shape - 1
* else:
* start = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
/*else*/ {
__pyx_v_start = __pyx_v_shape;
}
__pyx_L14:;
/* "View.MemoryView":846
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
}
__pyx_L12:;
/* "View.MemoryView":841
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
goto __pyx_L11;
}
/* "View.MemoryView":852
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":853
* else:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = 0
*/
__pyx_v_start = (__pyx_v_shape - 1);
/* "View.MemoryView":852
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
goto __pyx_L15;
}
/* "View.MemoryView":855
* start = shape - 1
* else:
* start = 0 # <<<<<<<<<<<<<<
*
* if have_stop:
*/
/*else*/ {
__pyx_v_start = 0;
}
__pyx_L15:;
}
__pyx_L11:;
/* "View.MemoryView":857
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
__pyx_t_2 = (__pyx_v_have_stop != 0);
if (__pyx_t_2) {
/* "View.MemoryView":858
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":859
* if have_stop:
* if stop < 0:
* stop += shape # <<<<<<<<<<<<<<
* if stop < 0:
* stop = 0
*/
__pyx_v_stop = (__pyx_v_stop + __pyx_v_shape);
/* "View.MemoryView":860
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":861
* stop += shape
* if stop < 0:
* stop = 0 # <<<<<<<<<<<<<<
* elif stop > shape:
* stop = shape
*/
__pyx_v_stop = 0;
/* "View.MemoryView":860
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
}
/* "View.MemoryView":858
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
goto __pyx_L17;
}
/* "View.MemoryView":862
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
__pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":863
* stop = 0
* elif stop > shape:
* stop = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
__pyx_v_stop = __pyx_v_shape;
/* "View.MemoryView":862
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
}
__pyx_L17:;
/* "View.MemoryView":857
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
goto __pyx_L16;
}
/* "View.MemoryView":865
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":866
* else:
* if negative_step:
* stop = -1 # <<<<<<<<<<<<<<
* else:
* stop = shape
*/
__pyx_v_stop = -1L;
/* "View.MemoryView":865
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
goto __pyx_L19;
}
/* "View.MemoryView":868
* stop = -1
* else:
* stop = shape # <<<<<<<<<<<<<<
*
* if not have_step:
*/
/*else*/ {
__pyx_v_stop = __pyx_v_shape;
}
__pyx_L19:;
}
__pyx_L16:;
/* "View.MemoryView":870
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
__pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":871
*
* if not have_step:
* step = 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_step = 1;
/* "View.MemoryView":870
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
}
/* "View.MemoryView":875
*
* with cython.cdivision(True):
* new_shape = (stop - start) // step # <<<<<<<<<<<<<<
*
* if (stop - start) - step * new_shape:
*/
__pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step);
/* "View.MemoryView":877
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
__pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":878
*
* if (stop - start) - step * new_shape:
* new_shape += 1 # <<<<<<<<<<<<<<
*
* if new_shape < 0:
*/
__pyx_v_new_shape = (__pyx_v_new_shape + 1);
/* "View.MemoryView":877
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
}
/* "View.MemoryView":880
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
__pyx_t_2 = ((__pyx_v_new_shape < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":881
*
* if new_shape < 0:
* new_shape = 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_new_shape = 0;
/* "View.MemoryView":880
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
}
/* "View.MemoryView":884
*
*
* dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<<
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset
*/
(__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step);
/* "View.MemoryView":885
*
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<<
* dst.suboffsets[new_ndim] = suboffset
*
*/
(__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape;
/* "View.MemoryView":886
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset;
}
__pyx_L3:;
/* "View.MemoryView":889
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
__pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":890
*
* if suboffset_dim[0] < 0:
* dst.data += start * stride # <<<<<<<<<<<<<<
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride
*/
__pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride));
/* "View.MemoryView":889
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
goto __pyx_L23;
}
/* "View.MemoryView":892
* dst.data += start * stride
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<<
*
* if suboffset >= 0:
*/
/*else*/ {
__pyx_t_3 = (__pyx_v_suboffset_dim[0]);
(__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride));
}
__pyx_L23:;
/* "View.MemoryView":894
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":895
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
__pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":896
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
__pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":897
* if not is_slice:
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<<
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
*/
__pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset);
/* "View.MemoryView":896
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
goto __pyx_L26;
}
/* "View.MemoryView":899
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<<
* "must be indexed and not sliced", dim)
* else:
*/
/*else*/ {
/* "View.MemoryView":900
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
* "must be indexed and not sliced", dim) # <<<<<<<<<<<<<<
* else:
* suboffset_dim[0] = new_ndim
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 899, __pyx_L1_error)
}
__pyx_L26:;
/* "View.MemoryView":895
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
goto __pyx_L25;
}
/* "View.MemoryView":902
* "must be indexed and not sliced", dim)
* else:
* suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<<
*
* return 0
*/
/*else*/ {
(__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim;
}
__pyx_L25:;
/* "View.MemoryView":894
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
}
/* "View.MemoryView":904
* suboffset_dim[0] = new_ndim
*
* return 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":807
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":910
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) {
Py_ssize_t __pyx_v_shape;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_suboffset;
Py_ssize_t __pyx_v_itemsize;
char *__pyx_v_resultp;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("pybuffer_index", 0);
/* "View.MemoryView":912
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index,
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<<
* cdef Py_ssize_t itemsize = view.itemsize
* cdef char *resultp
*/
__pyx_v_suboffset = -1L;
/* "View.MemoryView":913
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
* cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<<
* cdef char *resultp
*
*/
__pyx_t_1 = __pyx_v_view->itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":916
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
__pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":917
*
* if view.ndim == 0:
* shape = view.len / itemsize # <<<<<<<<<<<<<<
* stride = itemsize
* else:
*/
if (unlikely(__pyx_v_itemsize == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(1, 917, __pyx_L1_error)
}
else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(1, 917, __pyx_L1_error)
}
__pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize);
/* "View.MemoryView":918
* if view.ndim == 0:
* shape = view.len / itemsize
* stride = itemsize # <<<<<<<<<<<<<<
* else:
* shape = view.shape[dim]
*/
__pyx_v_stride = __pyx_v_itemsize;
/* "View.MemoryView":916
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
goto __pyx_L3;
}
/* "View.MemoryView":920
* stride = itemsize
* else:
* shape = view.shape[dim] # <<<<<<<<<<<<<<
* stride = view.strides[dim]
* if view.suboffsets != NULL:
*/
/*else*/ {
__pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]);
/* "View.MemoryView":921
* else:
* shape = view.shape[dim]
* stride = view.strides[dim] # <<<<<<<<<<<<<<
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim]
*/
__pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]);
/* "View.MemoryView":922
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
__pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":923
* stride = view.strides[dim]
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<<
*
* if index < 0:
*/
__pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]);
/* "View.MemoryView":922
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
}
}
__pyx_L3:;
/* "View.MemoryView":925
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":926
*
* if index < 0:
* index += view.shape[dim] # <<<<<<<<<<<<<<
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*/
__pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim]));
/* "View.MemoryView":927
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":928
* index += view.shape[dim]
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* if index >= shape:
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 928, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 928, __pyx_L1_error)
/* "View.MemoryView":927
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
}
/* "View.MemoryView":925
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
}
/* "View.MemoryView":930
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":931
*
* if index >= shape:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* resultp = bufp + index * stride
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 931, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 931, __pyx_L1_error)
/* "View.MemoryView":930
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
}
/* "View.MemoryView":933
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* resultp = bufp + index * stride # <<<<<<<<<<<<<<
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset
*/
__pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride));
/* "View.MemoryView":934
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":935
* resultp = bufp + index * stride
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<<
*
* return resultp
*/
__pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset);
/* "View.MemoryView":934
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
}
/* "View.MemoryView":937
* resultp = (<char **> resultp)[0] + suboffset
*
* return resultp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_resultp;
goto __pyx_L0;
/* "View.MemoryView":910
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":943
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) {
int __pyx_v_ndim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_r;
int __pyx_t_1;
Py_ssize_t *__pyx_t_2;
long __pyx_t_3;
long __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":944
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0:
* cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<<
*
* cdef Py_ssize_t *shape = memslice.shape
*/
__pyx_t_1 = __pyx_v_memslice->memview->view.ndim;
__pyx_v_ndim = __pyx_t_1;
/* "View.MemoryView":946
* cdef int ndim = memslice.memview.view.ndim
*
* cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<<
* cdef Py_ssize_t *strides = memslice.strides
*
*/
__pyx_t_2 = __pyx_v_memslice->shape;
__pyx_v_shape = __pyx_t_2;
/* "View.MemoryView":947
*
* cdef Py_ssize_t *shape = memslice.shape
* cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __pyx_v_memslice->strides;
__pyx_v_strides = __pyx_t_2;
/* "View.MemoryView":951
*
* cdef int i, j
* for i in range(ndim / 2): # <<<<<<<<<<<<<<
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
*/
__pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2);
__pyx_t_4 = __pyx_t_3;
for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":952
* cdef int i, j
* for i in range(ndim / 2):
* j = ndim - 1 - i # <<<<<<<<<<<<<<
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i]
*/
__pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i);
/* "View.MemoryView":953
* for i in range(ndim / 2):
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<<
* shape[i], shape[j] = shape[j], shape[i]
*
*/
__pyx_t_5 = (__pyx_v_strides[__pyx_v_j]);
__pyx_t_6 = (__pyx_v_strides[__pyx_v_i]);
(__pyx_v_strides[__pyx_v_i]) = __pyx_t_5;
(__pyx_v_strides[__pyx_v_j]) = __pyx_t_6;
/* "View.MemoryView":954
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<<
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
*/
__pyx_t_6 = (__pyx_v_shape[__pyx_v_j]);
__pyx_t_5 = (__pyx_v_shape[__pyx_v_i]);
(__pyx_v_shape[__pyx_v_i]) = __pyx_t_6;
(__pyx_v_shape[__pyx_v_j]) = __pyx_t_5;
/* "View.MemoryView":956
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
__pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0);
if (!__pyx_t_8) {
} else {
__pyx_t_7 = __pyx_t_8;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0);
__pyx_t_7 = __pyx_t_8;
__pyx_L6_bool_binop_done:;
if (__pyx_t_7) {
/* "View.MemoryView":957
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<<
*
* return 1
*/
__pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L1_error)
/* "View.MemoryView":956
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
}
}
/* "View.MemoryView":959
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
* return 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 1;
goto __pyx_L0;
/* "View.MemoryView":943
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":976
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* Python wrapper */
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":977
*
* def __dealloc__(self):
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1);
/* "View.MemoryView":976
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":979
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":980
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":981
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL:
* return self.to_object_func(itemp) # <<<<<<<<<<<<<<
* else:
* return memoryview.convert_item_to_object(self, itemp)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 981, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":980
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
}
/* "View.MemoryView":983
* return self.to_object_func(itemp)
* else:
* return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 983, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":979
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":985
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":986
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":987
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<<
* else:
* memoryview.assign_item_from_object(self, itemp, value)
*/
__pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 987, __pyx_L1_error)
/* "View.MemoryView":986
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":989
* self.to_dtype_func(itemp, value)
* else:
* memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<<
*
* @property
*/
/*else*/ {
__pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 989, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
__pyx_L3:;
/* "View.MemoryView":985
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":992
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":993
* @property
* def base(self):
* return self.from_object # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->from_object);
__pyx_r = __pyx_v_self->from_object;
goto __pyx_L0;
/* "View.MemoryView":992
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":999
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_TypeInfo *__pyx_t_4;
Py_buffer __pyx_t_5;
Py_ssize_t *__pyx_t_6;
Py_ssize_t *__pyx_t_7;
Py_ssize_t *__pyx_t_8;
Py_ssize_t __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_fromslice", 0);
/* "View.MemoryView":1007
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1008
*
* if <PyObject *> memviewslice.memview == Py_None:
* return None # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
/* "View.MemoryView":1007
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
}
/* "View.MemoryView":1013
*
*
* result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<<
*
* result.from_slice = memviewslice
*/
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None);
__Pyx_INCREF(__pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":1015
* result = _memoryviewslice(None, 0, dtype_is_object)
*
* result.from_slice = memviewslice # <<<<<<<<<<<<<<
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
*/
__pyx_v_result->from_slice = __pyx_v_memviewslice;
/* "View.MemoryView":1016
*
* result.from_slice = memviewslice
* __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<<
*
* result.from_object = (<memoryview> memviewslice.memview).base
*/
__PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1);
/* "View.MemoryView":1018
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
* result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<<
* result.typeinfo = memviewslice.memview.typeinfo
*
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1018, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__Pyx_GOTREF(__pyx_v_result->from_object);
__Pyx_DECREF(__pyx_v_result->from_object);
__pyx_v_result->from_object = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":1019
*
* result.from_object = (<memoryview> memviewslice.memview).base
* result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<<
*
* result.view = memviewslice.memview.view
*/
__pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo;
__pyx_v_result->__pyx_base.typeinfo = __pyx_t_4;
/* "View.MemoryView":1021
* result.typeinfo = memviewslice.memview.typeinfo
*
* result.view = memviewslice.memview.view # <<<<<<<<<<<<<<
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
*/
__pyx_t_5 = __pyx_v_memviewslice.memview->view;
__pyx_v_result->__pyx_base.view = __pyx_t_5;
/* "View.MemoryView":1022
*
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<<
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
*/
__pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data);
/* "View.MemoryView":1023
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim;
/* "View.MemoryView":1024
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None;
/* "View.MemoryView":1025
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
*/
Py_INCREF(Py_None);
/* "View.MemoryView":1027
* Py_INCREF(Py_None)
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<<
* result.flags = PyBUF_RECORDS
* else:
*/
__pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1028
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
* result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<<
* else:
* result.flags = PyBUF_RECORDS_RO
*/
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS;
/* "View.MemoryView":1027
* Py_INCREF(Py_None)
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<<
* result.flags = PyBUF_RECORDS
* else:
*/
goto __pyx_L4;
}
/* "View.MemoryView":1030
* result.flags = PyBUF_RECORDS
* else:
* result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<<
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
*/
/*else*/ {
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO;
}
__pyx_L4:;
/* "View.MemoryView":1032
* result.flags = PyBUF_RECORDS_RO
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<<
* result.view.strides = <Py_ssize_t *> result.from_slice.strides
*
*/
__pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape);
/* "View.MemoryView":1033
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
* result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides);
/* "View.MemoryView":1036
*
*
* result.view.suboffsets = NULL # <<<<<<<<<<<<<<
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
*/
__pyx_v_result->__pyx_base.view.suboffsets = NULL;
/* "View.MemoryView":1037
*
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
*/
__pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_v_suboffset = (__pyx_t_6[0]);
/* "View.MemoryView":1038
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
__pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1039
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets);
/* "View.MemoryView":1040
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break # <<<<<<<<<<<<<<
*
* result.view.len = result.view.itemsize
*/
goto __pyx_L6_break;
/* "View.MemoryView":1038
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
}
}
__pyx_L6_break:;
/* "View.MemoryView":1042
* break
*
* result.view.len = result.view.itemsize # <<<<<<<<<<<<<<
* for length in result.view.shape[:ndim]:
* result.view.len *= length
*/
__pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
/* "View.MemoryView":1043
*
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<<
* result.view.len *= length
*
*/
__pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1043, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":1044
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]:
* result.view.len *= length # <<<<<<<<<<<<<<
*
* result.to_object_func = to_object_func
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1044, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1044, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1044, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
}
/* "View.MemoryView":1046
* result.view.len *= length
*
* result.to_object_func = to_object_func # <<<<<<<<<<<<<<
* result.to_dtype_func = to_dtype_func
*
*/
__pyx_v_result->to_object_func = __pyx_v_to_object_func;
/* "View.MemoryView":1047
*
* result.to_object_func = to_object_func
* result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func;
/* "View.MemoryView":1049
* result.to_dtype_func = to_dtype_func
*
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":999
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1052
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) {
struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0;
__Pyx_memviewslice *__pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_slice_from_memview", 0);
/* "View.MemoryView":1055
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1056
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice):
* obj = memview # <<<<<<<<<<<<<<
* return &obj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1056, __pyx_L1_error)
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":1057
* if isinstance(memview, _memoryviewslice):
* obj = memview
* return &obj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, mslice)
*/
__pyx_r = (&__pyx_v_obj->from_slice);
goto __pyx_L0;
/* "View.MemoryView":1055
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
}
/* "View.MemoryView":1059
* return &obj.from_slice
* else:
* slice_copy(memview, mslice) # <<<<<<<<<<<<<<
* return mslice
*
*/
/*else*/ {
__pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice);
/* "View.MemoryView":1060
* else:
* slice_copy(memview, mslice)
* return mslice # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_slice_copy')
*/
__pyx_r = __pyx_v_mslice;
goto __pyx_L0;
}
/* "View.MemoryView":1052
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_obj);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1063
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) {
int __pyx_v_dim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
Py_ssize_t *__pyx_v_suboffsets;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
Py_ssize_t __pyx_t_5;
__Pyx_RefNannySetupContext("slice_copy", 0);
/* "View.MemoryView":1067
* cdef (Py_ssize_t*) shape, strides, suboffsets
*
* shape = memview.view.shape # <<<<<<<<<<<<<<
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets
*/
__pyx_t_1 = __pyx_v_memview->view.shape;
__pyx_v_shape = __pyx_t_1;
/* "View.MemoryView":1068
*
* shape = memview.view.shape
* strides = memview.view.strides # <<<<<<<<<<<<<<
* suboffsets = memview.view.suboffsets
*
*/
__pyx_t_1 = __pyx_v_memview->view.strides;
__pyx_v_strides = __pyx_t_1;
/* "View.MemoryView":1069
* shape = memview.view.shape
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<<
*
* dst.memview = <__pyx_memoryview *> memview
*/
__pyx_t_1 = __pyx_v_memview->view.suboffsets;
__pyx_v_suboffsets = __pyx_t_1;
/* "View.MemoryView":1071
* suboffsets = memview.view.suboffsets
*
* dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<<
* dst.data = <char *> memview.view.buf
*
*/
__pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview);
/* "View.MemoryView":1072
*
* dst.memview = <__pyx_memoryview *> memview
* dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<<
*
* for dim in range(memview.view.ndim):
*/
__pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf);
/* "View.MemoryView":1074
* dst.data = <char *> memview.view.buf
*
* for dim in range(memview.view.ndim): # <<<<<<<<<<<<<<
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
*/
__pyx_t_2 = __pyx_v_memview->view.ndim;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_dim = __pyx_t_4;
/* "View.MemoryView":1075
*
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<<
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*/
(__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]);
/* "View.MemoryView":1076
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<<
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*
*/
(__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]);
/* "View.MemoryView":1077
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object')
*/
if ((__pyx_v_suboffsets != 0)) {
__pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]);
} else {
__pyx_t_5 = -1L;
}
(__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5;
}
/* "View.MemoryView":1063
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1080
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) {
__Pyx_memviewslice __pyx_v_memviewslice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_copy", 0);
/* "View.MemoryView":1083
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<<
* return memoryview_copy_from_slice(memview, &memviewslice)
*
*/
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice));
/* "View.MemoryView":1084
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice)
* return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object_from_slice')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1084, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":1080
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1087
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) {
PyObject *(*__pyx_v_to_object_func)(char *);
int (*__pyx_v_to_dtype_func)(char *, PyObject *);
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *(*__pyx_t_3)(char *);
int (*__pyx_t_4)(char *, PyObject *);
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0);
/* "View.MemoryView":1094
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1095
*
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<<
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
*/
__pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func;
__pyx_v_to_object_func = __pyx_t_3;
/* "View.MemoryView":1096
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<<
* else:
* to_object_func = NULL
*/
__pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func;
__pyx_v_to_dtype_func = __pyx_t_4;
/* "View.MemoryView":1094
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
goto __pyx_L3;
}
/* "View.MemoryView":1098
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
* to_object_func = NULL # <<<<<<<<<<<<<<
* to_dtype_func = NULL
*
*/
/*else*/ {
__pyx_v_to_object_func = NULL;
/* "View.MemoryView":1099
* else:
* to_object_func = NULL
* to_dtype_func = NULL # <<<<<<<<<<<<<<
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
*/
__pyx_v_to_dtype_func = NULL;
}
__pyx_L3:;
/* "View.MemoryView":1101
* to_dtype_func = NULL
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<<
* to_object_func, to_dtype_func,
* memview.dtype_is_object)
*/
__Pyx_XDECREF(__pyx_r);
/* "View.MemoryView":1103
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
* to_object_func, to_dtype_func,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1101, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":1087
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1109
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) {
Py_ssize_t __pyx_r;
int __pyx_t_1;
/* "View.MemoryView":1110
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
__pyx_t_1 = ((__pyx_v_arg < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1111
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0:
* return -arg # <<<<<<<<<<<<<<
* else:
* return arg
*/
__pyx_r = (-__pyx_v_arg);
goto __pyx_L0;
/* "View.MemoryView":1110
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
}
/* "View.MemoryView":1113
* return -arg
* else:
* return arg # <<<<<<<<<<<<<<
*
* @cname('__pyx_get_best_slice_order')
*/
/*else*/ {
__pyx_r = __pyx_v_arg;
goto __pyx_L0;
}
/* "View.MemoryView":1109
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1116
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) {
int __pyx_v_i;
Py_ssize_t __pyx_v_c_stride;
Py_ssize_t __pyx_v_f_stride;
char __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
/* "View.MemoryView":1121
* """
* cdef int i
* cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<<
* cdef Py_ssize_t f_stride = 0
*
*/
__pyx_v_c_stride = 0;
/* "View.MemoryView":1122
* cdef int i
* cdef Py_ssize_t c_stride = 0
* cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_f_stride = 0;
/* "View.MemoryView":1124
* cdef Py_ssize_t f_stride = 0
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1125
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1126
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1127
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
goto __pyx_L4_break;
/* "View.MemoryView":1125
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
}
}
__pyx_L4_break:;
/* "View.MemoryView":1129
* break
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
*/
__pyx_t_1 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_1;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1130
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1131
* for i in range(ndim):
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1132
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
*/
goto __pyx_L7_break;
/* "View.MemoryView":1130
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
}
}
__pyx_L7_break:;
/* "View.MemoryView":1134
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
__pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1135
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
* return 'C' # <<<<<<<<<<<<<<
* else:
* return 'F'
*/
__pyx_r = 'C';
goto __pyx_L0;
/* "View.MemoryView":1134
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
}
/* "View.MemoryView":1137
* return 'C'
* else:
* return 'F' # <<<<<<<<<<<<<<
*
* @cython.cdivision(True)
*/
/*else*/ {
__pyx_r = 'F';
goto __pyx_L0;
}
/* "View.MemoryView":1116
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1140
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent;
Py_ssize_t __pyx_v_dst_extent;
Py_ssize_t __pyx_v_src_stride;
Py_ssize_t __pyx_v_dst_stride;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
/* "View.MemoryView":1147
*
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
*/
__pyx_v_src_extent = (__pyx_v_src_shape[0]);
/* "View.MemoryView":1148
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0]
*/
__pyx_v_dst_extent = (__pyx_v_dst_shape[0]);
/* "View.MemoryView":1149
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
*/
__pyx_v_src_stride = (__pyx_v_src_strides[0]);
/* "View.MemoryView":1150
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_dst_stride = (__pyx_v_dst_strides[0]);
/* "View.MemoryView":1152
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1153
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
__pyx_t_2 = ((__pyx_v_src_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
__pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
/* "View.MemoryView":1154
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
*/
__pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize);
if (__pyx_t_2) {
__pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride));
}
__pyx_t_3 = (__pyx_t_2 != 0);
__pyx_t_1 = __pyx_t_3;
__pyx_L5_bool_binop_done:;
/* "View.MemoryView":1153
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
if (__pyx_t_1) {
/* "View.MemoryView":1155
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
(void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent)));
/* "View.MemoryView":1153
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
goto __pyx_L4;
}
/* "View.MemoryView":1157
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
*/
/*else*/ {
__pyx_t_4 = __pyx_v_dst_extent;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1158
* else:
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<<
* src_data += src_stride
* dst_data += dst_stride
*/
(void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize));
/* "View.MemoryView":1159
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
* else:
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1160
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L4:;
/* "View.MemoryView":1152
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
goto __pyx_L3;
}
/* "View.MemoryView":1162
* dst_data += dst_stride
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* _copy_strided_to_strided(src_data, src_strides + 1,
* dst_data, dst_strides + 1,
*/
/*else*/ {
__pyx_t_4 = __pyx_v_dst_extent;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1163
* else:
* for i in range(dst_extent):
* _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<<
* dst_data, dst_strides + 1,
* src_shape + 1, dst_shape + 1,
*/
_copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize);
/* "View.MemoryView":1167
* src_shape + 1, dst_shape + 1,
* ndim - 1, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
*
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1168
* ndim - 1, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src,
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1140
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
/* function exit code */
}
/* "View.MemoryView":1170
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
/* "View.MemoryView":1173
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
* _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<<
* src.shape, dst.shape, ndim, itemsize)
*
*/
_copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1170
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1177
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef Py_ssize_t shape, size = src.memview.view.itemsize
*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) {
Py_ssize_t __pyx_v_shape;
Py_ssize_t __pyx_v_size;
Py_ssize_t __pyx_r;
Py_ssize_t __pyx_t_1;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
/* "View.MemoryView":1179
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil:
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<<
*
* for shape in src.shape[:ndim]:
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_size = __pyx_t_1;
/* "View.MemoryView":1181
* cdef Py_ssize_t shape, size = src.memview.view.itemsize
*
* for shape in src.shape[:ndim]: # <<<<<<<<<<<<<<
* size *= shape
*
*/
__pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim);
for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
__pyx_t_2 = __pyx_t_4;
__pyx_v_shape = (__pyx_t_2[0]);
/* "View.MemoryView":1182
*
* for shape in src.shape[:ndim]:
* size *= shape # <<<<<<<<<<<<<<
*
* return size
*/
__pyx_v_size = (__pyx_v_size * __pyx_v_shape);
}
/* "View.MemoryView":1184
* size *= shape
*
* return size # <<<<<<<<<<<<<<
*
* @cname('__pyx_fill_contig_strides_array')
*/
__pyx_r = __pyx_v_size;
goto __pyx_L0;
/* "View.MemoryView":1177
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef Py_ssize_t shape, size = src.memview.view.itemsize
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1187
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) {
int __pyx_v_idx;
Py_ssize_t __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
/* "View.MemoryView":1196
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
__pyx_t_1 = ((__pyx_v_order == 'F') != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1197
*
* if order == 'F':
* for idx in range(ndim): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride *= shape[idx]
*/
__pyx_t_2 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_idx = __pyx_t_4;
/* "View.MemoryView":1198
* if order == 'F':
* for idx in range(ndim):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride *= shape[idx]
* else:
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1199
* for idx in range(ndim):
* strides[idx] = stride
* stride *= shape[idx] # <<<<<<<<<<<<<<
* else:
* for idx in range(ndim - 1, -1, -1):
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
/* "View.MemoryView":1196
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
goto __pyx_L3;
}
/* "View.MemoryView":1201
* stride *= shape[idx]
* else:
* for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride *= shape[idx]
*/
/*else*/ {
for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) {
__pyx_v_idx = __pyx_t_2;
/* "View.MemoryView":1202
* else:
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride *= shape[idx]
*
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1203
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride
* stride *= shape[idx] # <<<<<<<<<<<<<<
*
* return stride
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
}
__pyx_L3:;
/* "View.MemoryView":1205
* stride *= shape[idx]
*
* return stride # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_data_to_temp')
*/
__pyx_r = __pyx_v_stride;
goto __pyx_L0;
/* "View.MemoryView":1187
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1208
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) {
int __pyx_v_i;
void *__pyx_v_result;
size_t __pyx_v_itemsize;
size_t __pyx_v_size;
void *__pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
struct __pyx_memoryview_obj *__pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":1219
* cdef void *result
*
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef size_t size = slice_get_size(src, ndim)
*
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1220
*
* cdef size_t itemsize = src.memview.view.itemsize
* cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<<
*
* result = malloc(size)
*/
__pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim);
/* "View.MemoryView":1222
* cdef size_t size = slice_get_size(src, ndim)
*
* result = malloc(size) # <<<<<<<<<<<<<<
* if not result:
* _err(MemoryError, NULL)
*/
__pyx_v_result = malloc(__pyx_v_size);
/* "View.MemoryView":1223
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
__pyx_t_2 = ((!(__pyx_v_result != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1224
* result = malloc(size)
* if not result:
* _err(MemoryError, NULL) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1224, __pyx_L1_error)
/* "View.MemoryView":1223
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
}
/* "View.MemoryView":1227
*
*
* tmpslice.data = <char *> result # <<<<<<<<<<<<<<
* tmpslice.memview = src.memview
* for i in range(ndim):
*/
__pyx_v_tmpslice->data = ((char *)__pyx_v_result);
/* "View.MemoryView":1228
*
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview # <<<<<<<<<<<<<<
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
*/
__pyx_t_4 = __pyx_v_src->memview;
__pyx_v_tmpslice->memview = __pyx_t_4;
/* "View.MemoryView":1229
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview
* for i in range(ndim): # <<<<<<<<<<<<<<
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1
*/
__pyx_t_3 = __pyx_v_ndim;
__pyx_t_5 = __pyx_t_3;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1230
* tmpslice.memview = src.memview
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<<
* tmpslice.suboffsets[i] = -1
*
*/
(__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]);
/* "View.MemoryView":1231
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize,
*/
(__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L;
}
/* "View.MemoryView":1233
* tmpslice.suboffsets[i] = -1
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<<
* ndim, order)
*
*/
(void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order));
/* "View.MemoryView":1237
*
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0
*/
__pyx_t_3 = __pyx_v_ndim;
__pyx_t_5 = __pyx_t_3;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1238
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
__pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1239
* for i in range(ndim):
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0 # <<<<<<<<<<<<<<
*
* if slice_is_contig(src[0], order, ndim):
*/
(__pyx_v_tmpslice->strides[__pyx_v_i]) = 0;
/* "View.MemoryView":1238
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
}
}
/* "View.MemoryView":1241
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1242
*
* if slice_is_contig(src[0], order, ndim):
* memcpy(result, src.data, size) # <<<<<<<<<<<<<<
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*/
(void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size));
/* "View.MemoryView":1241
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
goto __pyx_L9;
}
/* "View.MemoryView":1244
* memcpy(result, src.data, size)
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<<
*
* return result
*/
/*else*/ {
copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize);
}
__pyx_L9:;
/* "View.MemoryView":1246
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":1208
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = NULL;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1251
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_extents", 0);
/* "View.MemoryView":1254
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
* (i, extent1, extent2)) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err_dim')
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":1253
* cdef int _err_extents(int i, Py_ssize_t extent1,
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<<
* (i, extent1, extent2))
*
*/
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1253, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1253, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__PYX_ERR(1, 1253, __pyx_L1_error)
/* "View.MemoryView":1251
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1257
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_dim", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1258
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil:
* raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err')
*/
__pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_INCREF(__pyx_v_error);
__pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
__pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 1258, __pyx_L1_error)
/* "View.MemoryView":1257
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1261
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1262
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
__pyx_t_1 = ((__pyx_v_msg != NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":1263
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL:
* raise error(msg.decode('ascii')) # <<<<<<<<<<<<<<
* else:
* raise error
*/
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1263, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_error);
__pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_4, function);
}
}
__pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1263, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 1263, __pyx_L1_error)
/* "View.MemoryView":1262
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
}
/* "View.MemoryView":1265
* raise error(msg.decode('ascii'))
* else:
* raise error # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_contents')
*/
/*else*/ {
__Pyx_Raise(__pyx_v_error, 0, 0, 0);
__PYX_ERR(1, 1265, __pyx_L1_error)
}
/* "View.MemoryView":1261
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1268
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) {
void *__pyx_v_tmpdata;
size_t __pyx_v_itemsize;
int __pyx_v_i;
char __pyx_v_order;
int __pyx_v_broadcasting;
int __pyx_v_direct_copy;
__Pyx_memviewslice __pyx_v_tmp;
int __pyx_v_ndim;
int __pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
void *__pyx_t_7;
int __pyx_t_8;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":1276
* Check for overlapping memory and verify the shapes.
* """
* cdef void *tmpdata = NULL # <<<<<<<<<<<<<<
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
*/
__pyx_v_tmpdata = NULL;
/* "View.MemoryView":1277
* """
* cdef void *tmpdata = NULL
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
*/
__pyx_t_1 = __pyx_v_src.memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1279
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
* cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<<
* cdef bint broadcasting = False
* cdef bint direct_copy = False
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim);
/* "View.MemoryView":1280
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False # <<<<<<<<<<<<<<
* cdef bint direct_copy = False
* cdef __Pyx_memviewslice tmp
*/
__pyx_v_broadcasting = 0;
/* "View.MemoryView":1281
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False
* cdef bint direct_copy = False # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice tmp
*
*/
__pyx_v_direct_copy = 0;
/* "View.MemoryView":1284
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
__pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1285
*
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<<
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim);
/* "View.MemoryView":1284
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
goto __pyx_L3;
}
/* "View.MemoryView":1286
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
__pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1287
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<<
*
* cdef int ndim = max(src_ndim, dst_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim);
/* "View.MemoryView":1286
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
}
__pyx_L3:;
/* "View.MemoryView":1289
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
* cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
__pyx_t_3 = __pyx_v_dst_ndim;
__pyx_t_4 = __pyx_v_src_ndim;
if (((__pyx_t_3 > __pyx_t_4) != 0)) {
__pyx_t_5 = __pyx_t_3;
} else {
__pyx_t_5 = __pyx_t_4;
}
__pyx_v_ndim = __pyx_t_5;
/* "View.MemoryView":1291
* cdef int ndim = max(src_ndim, dst_ndim)
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
*/
__pyx_t_5 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_5;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1292
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1293
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1294
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
* broadcasting = True # <<<<<<<<<<<<<<
* src.strides[i] = 0
* else:
*/
__pyx_v_broadcasting = 1;
/* "View.MemoryView":1295
* if src.shape[i] == 1:
* broadcasting = True
* src.strides[i] = 0 # <<<<<<<<<<<<<<
* else:
* _err_extents(i, dst.shape[i], src.shape[i])
*/
(__pyx_v_src.strides[__pyx_v_i]) = 0;
/* "View.MemoryView":1293
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
goto __pyx_L7;
}
/* "View.MemoryView":1297
* src.strides[i] = 0
* else:
* _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<<
*
* if src.suboffsets[i] >= 0:
*/
/*else*/ {
__pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error)
}
__pyx_L7:;
/* "View.MemoryView":1292
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
}
/* "View.MemoryView":1299
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
__pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1300
*
* if src.suboffsets[i] >= 0:
* _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<<
*
* if slices_overlap(&src, &dst, ndim, itemsize):
*/
__pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1300, __pyx_L1_error)
/* "View.MemoryView":1299
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
}
}
/* "View.MemoryView":1302
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(src, order, ndim):
*/
__pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1304
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
__pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1305
*
* if not slice_is_contig(src, order, ndim):
* order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<<
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim);
/* "View.MemoryView":1304
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
}
/* "View.MemoryView":1307
* order = get_best_order(&dst, ndim)
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<<
* src = tmp
*
*/
__pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1307, __pyx_L1_error)
__pyx_v_tmpdata = __pyx_t_7;
/* "View.MemoryView":1308
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
* src = tmp # <<<<<<<<<<<<<<
*
* if not broadcasting:
*/
__pyx_v_src = __pyx_v_tmp;
/* "View.MemoryView":1302
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(src, order, ndim):
*/
}
/* "View.MemoryView":1310
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1313
*
*
* if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1314
*
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<<
* elif slice_is_contig(src, 'F', ndim):
* direct_copy = slice_is_contig(dst, 'F', ndim)
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim);
/* "View.MemoryView":1313
*
*
* if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
*/
goto __pyx_L12;
}
/* "View.MemoryView":1315
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1316
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
* direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<<
*
* if direct_copy:
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim);
/* "View.MemoryView":1315
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
*/
}
__pyx_L12:;
/* "View.MemoryView":1318
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_2 = (__pyx_v_direct_copy != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1320
* if direct_copy:
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1321
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
*/
(void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim)));
/* "View.MemoryView":1322
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
* free(tmpdata)
* return 0
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1323
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1324
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* if order == 'F' == get_best_order(&dst, ndim):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1318
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
}
/* "View.MemoryView":1310
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":1326
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = (__pyx_v_order == 'F');
if (__pyx_t_2) {
__pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim));
}
__pyx_t_8 = (__pyx_t_2 != 0);
if (__pyx_t_8) {
/* "View.MemoryView":1329
*
*
* transpose_memslice(&src) # <<<<<<<<<<<<<<
* transpose_memslice(&dst)
*
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1329, __pyx_L1_error)
/* "View.MemoryView":1330
*
* transpose_memslice(&src)
* transpose_memslice(&dst) # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1330, __pyx_L1_error)
/* "View.MemoryView":1326
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":1332
* transpose_memslice(&dst)
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1333
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
*/
copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1334
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
* free(tmpdata)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1336
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1337
*
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_broadcast_leading')
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1268
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1340
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) {
int __pyx_v_i;
int __pyx_v_offset;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1344
* int ndim_other) nogil:
* cdef int i
* cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim);
/* "View.MemoryView":1346
* cdef int offset = ndim_other - ndim
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1347
*
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<<
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*/
(__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]);
/* "View.MemoryView":1348
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<<
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
*/
(__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1349
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<<
*
* for i in range(offset):
*/
(__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]);
}
/* "View.MemoryView":1351
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
* for i in range(offset): # <<<<<<<<<<<<<<
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
*/
__pyx_t_1 = __pyx_v_offset;
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1352
*
* for i in range(offset):
* mslice.shape[i] = 1 # <<<<<<<<<<<<<<
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1
*/
(__pyx_v_mslice->shape[__pyx_v_i]) = 1;
/* "View.MemoryView":1353
* for i in range(offset):
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<<
* mslice.suboffsets[i] = -1
*
*/
(__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]);
/* "View.MemoryView":1354
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L;
}
/* "View.MemoryView":1340
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1362
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) {
int __pyx_t_1;
/* "View.MemoryView":1366
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
__pyx_t_1 = (__pyx_v_dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1367
*
* if dtype_is_object:
* refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<<
* dst.strides, ndim, inc)
*
*/
__pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1366
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
}
/* "View.MemoryView":1362
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
/* function exit code */
}
/* "View.MemoryView":1371
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
__Pyx_RefNannyDeclarations
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0);
/* "View.MemoryView":1374
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
* refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1371
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
/* "View.MemoryView":1377
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
__Pyx_RefNannySetupContext("refcount_objects_in_slice", 0);
/* "View.MemoryView":1381
* cdef Py_ssize_t i
*
* for i in range(shape[0]): # <<<<<<<<<<<<<<
* if ndim == 1:
* if inc:
*/
__pyx_t_1 = (__pyx_v_shape[0]);
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1382
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
__pyx_t_4 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":1383
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
__pyx_t_4 = (__pyx_v_inc != 0);
if (__pyx_t_4) {
/* "View.MemoryView":1384
* if ndim == 1:
* if inc:
* Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* Py_DECREF((<PyObject **> data)[0])
*/
Py_INCREF((((PyObject **)__pyx_v_data)[0]));
/* "View.MemoryView":1383
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
goto __pyx_L6;
}
/* "View.MemoryView":1386
* Py_INCREF((<PyObject **> data)[0])
* else:
* Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
*/
/*else*/ {
Py_DECREF((((PyObject **)__pyx_v_data)[0]));
}
__pyx_L6:;
/* "View.MemoryView":1382
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
goto __pyx_L5;
}
/* "View.MemoryView":1388
* Py_DECREF((<PyObject **> data)[0])
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, inc)
*
*/
/*else*/ {
/* "View.MemoryView":1389
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
* ndim - 1, inc) # <<<<<<<<<<<<<<
*
* data += strides[0]
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc);
}
__pyx_L5:;
/* "View.MemoryView":1391
* ndim - 1, inc)
*
* data += strides[0] # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0]));
}
/* "View.MemoryView":1377
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1397
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) {
/* "View.MemoryView":1400
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1401
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False)
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<<
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1403
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
*
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1397
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1407
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_extent;
int __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
Py_ssize_t __pyx_t_4;
/* "View.MemoryView":1411
* size_t itemsize, void *item) nogil:
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t extent = shape[0]
*
*/
__pyx_v_stride = (__pyx_v_strides[0]);
/* "View.MemoryView":1412
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0]
* cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_extent = (__pyx_v_shape[0]);
/* "View.MemoryView":1414
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1415
*
* if ndim == 1:
* for i in range(extent): # <<<<<<<<<<<<<<
* memcpy(data, item, itemsize)
* data += stride
*/
__pyx_t_2 = __pyx_v_extent;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1416
* if ndim == 1:
* for i in range(extent):
* memcpy(data, item, itemsize) # <<<<<<<<<<<<<<
* data += stride
* else:
*/
(void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize));
/* "View.MemoryView":1417
* for i in range(extent):
* memcpy(data, item, itemsize)
* data += stride # <<<<<<<<<<<<<<
* else:
* for i in range(extent):
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
/* "View.MemoryView":1414
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
goto __pyx_L3;
}
/* "View.MemoryView":1419
* data += stride
* else:
* for i in range(extent): # <<<<<<<<<<<<<<
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
*/
/*else*/ {
__pyx_t_2 = __pyx_v_extent;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1420
* else:
* for i in range(extent):
* _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, itemsize, item)
* data += stride
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1422
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
* data += stride # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1407
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
/* function exit code */
}
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v___pyx_type = 0;
long __pyx_v___pyx_checksum;
PyObject *__pyx_v___pyx_state = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v___pyx_type = values[0];
__pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_v___pyx_state = values[2];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_v___pyx_PickleError = 0;
PyObject *__pyx_v___pyx_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0);
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
*/
__pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0);
if (__pyx_t_1) {
/* "(tree fragment)":5
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931:
* from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<<
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
*/
__pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_n_s_PickleError);
__Pyx_GIVEREF(__pyx_n_s_PickleError);
PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError);
__pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_t_2);
__pyx_v___pyx_PickleError = __pyx_t_2;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":6
* if __pyx_checksum != 0xb068931:
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<<
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None:
*/
__pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_INCREF(__pyx_v___pyx_PickleError);
__pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 6, __pyx_L1_error)
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
*/
}
/* "(tree fragment)":7
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<<
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v___pyx_result = __pyx_t_3;
__pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
*/
__pyx_t_1 = (__pyx_v___pyx_state != Py_None);
__pyx_t_6 = (__pyx_t_1 != 0);
if (__pyx_t_6) {
/* "(tree fragment)":9
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<<
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error)
__pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
*/
}
/* "(tree fragment)":10
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result # <<<<<<<<<<<<<<
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v___pyx_result);
__pyx_r = __pyx_v___pyx_result;
goto __pyx_L0;
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v___pyx_PickleError);
__Pyx_XDECREF(__pyx_v___pyx_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0);
/* "(tree fragment)":12
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<<
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[1])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v___pyx_result->name);
__Pyx_DECREF(__pyx_v___pyx_result->name);
__pyx_v___pyx_result->name = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[1])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 13, __pyx_L1_error)
}
__pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_4 = ((__pyx_t_3 > 1) != 0);
if (__pyx_t_4) {
} else {
__pyx_t_2 = __pyx_t_4;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_5 = (__pyx_t_4 != 0);
__pyx_t_2 = __pyx_t_5;
__pyx_L4_bool_binop_done:;
if (__pyx_t_2) {
/* "(tree fragment)":14
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<<
*/
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 14, __pyx_L1_error)
}
__pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_8)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_8);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
}
}
__pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6);
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[1])
*/
}
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static struct __pyx_vtabstruct_array __pyx_vtable_array;
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_array_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_array_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_array;
p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None);
p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None);
if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad;
return o;
bad:
Py_DECREF(o); o = 0;
return NULL;
}
static void __pyx_tp_dealloc_array(PyObject *o) {
struct __pyx_array_obj *p = (struct __pyx_array_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
__pyx_array___dealloc__(o);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->mode);
Py_CLEAR(p->_format);
(*Py_TYPE(o)->tp_free)(o);
}
static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_array___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) {
PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n);
if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
v = __pyx_array___getattr__(o, n);
}
return v;
}
static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o);
}
static PyMethodDef __pyx_methods_array[] = {
{"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_array[] = {
{(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_array = {
__pyx_array___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_array, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_array = {
__pyx_array___len__, /*mp_length*/
__pyx_array___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_array, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_array = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_array_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_array = {
PyVarObject_HEAD_INIT(0, 0)
"skbio.stats.ordination._cutils.array", /*tp_name*/
sizeof(struct __pyx_array_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_array, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_array, /*tp_as_sequence*/
&__pyx_tp_as_mapping_array, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
__pyx_tp_getattro_array, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_array, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/
0, /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_array, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_array, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_array, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
struct __pyx_MemviewEnum_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_MemviewEnum_obj *)o);
p->name = Py_None; Py_INCREF(Py_None);
return o;
}
static void __pyx_tp_dealloc_Enum(PyObject *o) {
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
Py_CLEAR(p->name);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
if (p->name) {
e = (*v)(p->name, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_Enum(PyObject *o) {
PyObject* tmp;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
tmp = ((PyObject*)p->name);
p->name = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
return 0;
}
static PyMethodDef __pyx_methods_Enum[] = {
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_MemviewEnum = {
PyVarObject_HEAD_INIT(0, 0)
"skbio.stats.ordination._cutils.Enum", /*tp_name*/
sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_Enum, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
__pyx_MemviewEnum___repr__, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_Enum, /*tp_traverse*/
__pyx_tp_clear_Enum, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_Enum, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
__pyx_MemviewEnum___init__, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_Enum, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview;
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryview_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryview_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_memoryview;
p->obj = Py_None; Py_INCREF(Py_None);
p->_size = Py_None; Py_INCREF(Py_None);
p->_array_interface = Py_None; Py_INCREF(Py_None);
p->view.obj = NULL;
if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad;
return o;
bad:
Py_DECREF(o); o = 0;
return NULL;
}
static void __pyx_tp_dealloc_memoryview(PyObject *o) {
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
__pyx_memoryview___dealloc__(o);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->obj);
Py_CLEAR(p->_size);
Py_CLEAR(p->_array_interface);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
if (p->obj) {
e = (*v)(p->obj, a); if (e) return e;
}
if (p->_size) {
e = (*v)(p->_size, a); if (e) return e;
}
if (p->_array_interface) {
e = (*v)(p->_array_interface, a); if (e) return e;
}
if (p->view.obj) {
e = (*v)(p->view.obj, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_memoryview(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
tmp = ((PyObject*)p->obj);
p->obj = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_size);
p->_size = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_array_interface);
p->_array_interface = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
Py_CLEAR(p->view.obj);
return 0;
}
static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_memoryview___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o);
}
static PyMethodDef __pyx_methods_memoryview[] = {
{"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0},
{"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0},
{"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0},
{"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_memoryview[] = {
{(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0},
{(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0},
{(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0},
{(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0},
{(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0},
{(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0},
{(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0},
{(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0},
{(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_memoryview = {
__pyx_memoryview___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_memoryview, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_memoryview = {
__pyx_memoryview___len__, /*mp_length*/
__pyx_memoryview___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_memoryview = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_memoryview_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_memoryview = {
PyVarObject_HEAD_INIT(0, 0)
"skbio.stats.ordination._cutils.memoryview", /*tp_name*/
sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_memoryview, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
__pyx_memoryview___repr__, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/
&__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
__pyx_memoryview___str__, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_memoryview, /*tp_traverse*/
__pyx_tp_clear_memoryview, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_memoryview, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_memoryview, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_memoryview, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice;
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryviewslice_obj *p;
PyObject *o = __pyx_tp_new_memoryview(t, a, k);
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryviewslice_obj *)o);
p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice;
p->from_object = Py_None; Py_INCREF(Py_None);
p->from_slice.memview = NULL;
return o;
}
static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) {
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
__pyx_memoryviewslice___dealloc__(o);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->from_object);
PyObject_GC_Track(o);
__pyx_tp_dealloc_memoryview(o);
}
static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e;
if (p->from_object) {
e = (*v)(p->from_object, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear__memoryviewslice(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
__pyx_tp_clear_memoryview(o);
tmp = ((PyObject*)p->from_object);
p->from_object = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
__PYX_XDEC_MEMVIEW(&p->from_slice, 1);
return 0;
}
static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o);
}
static PyMethodDef __pyx_methods__memoryviewslice[] = {
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = {
{(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_memoryviewslice = {
PyVarObject_HEAD_INIT(0, 0)
"skbio.stats.ordination._cutils._memoryviewslice", /*tp_name*/
sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___repr__, /*tp_repr*/
#else
0, /*tp_repr*/
#endif
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___str__, /*tp_str*/
#else
0, /*tp_str*/
#endif
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
"Internal class for passing memoryview slices to Python", /*tp_doc*/
__pyx_tp_traverse__memoryviewslice, /*tp_traverse*/
__pyx_tp_clear__memoryviewslice, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods__memoryviewslice, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets__memoryviewslice, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new__memoryviewslice, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static PyMethodDef __pyx_methods[] = {
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
#if CYTHON_PEP489_MULTI_PHASE_INIT
static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
static int __pyx_pymod_exec__cutils(PyObject* module); /*proto*/
static PyModuleDef_Slot __pyx_moduledef_slots[] = {
{Py_mod_create, (void*)__pyx_pymod_create},
{Py_mod_exec, (void*)__pyx_pymod_exec__cutils},
{0, NULL}
};
#endif
static struct PyModuleDef __pyx_moduledef = {
PyModuleDef_HEAD_INIT,
"_cutils",
0, /* m_doc */
#if CYTHON_PEP489_MULTI_PHASE_INIT
0, /* m_size */
#else
-1, /* m_size */
#endif
__pyx_methods /* m_methods */,
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_moduledef_slots, /* m_slots */
#else
NULL, /* m_reload */
#endif
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
#ifndef CYTHON_SMALL_CODE
#if defined(__clang__)
#define CYTHON_SMALL_CODE
#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
#define CYTHON_SMALL_CODE __attribute__((cold))
#else
#define CYTHON_SMALL_CODE
#endif
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_kp_s_, __pyx_k_, sizeof(__pyx_k_), 0, 0, 1, 0},
{&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1},
{&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0},
{&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0},
{&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1},
{&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0},
{&__pyx_kp_s_Expected_at_least_d_argument_s_g, __pyx_k_Expected_at_least_d_argument_s_g, sizeof(__pyx_k_Expected_at_least_d_argument_s_g), 0, 0, 1, 0},
{&__pyx_kp_s_Function_call_with_ambiguous_arg, __pyx_k_Function_call_with_ambiguous_arg, sizeof(__pyx_k_Function_call_with_ambiguous_arg), 0, 0, 1, 0},
{&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0},
{&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1},
{&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0},
{&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1},
{&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0},
{&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0},
{&__pyx_kp_s_No_matching_signature_found, __pyx_k_No_matching_signature_found, sizeof(__pyx_k_No_matching_signature_found), 0, 0, 1, 0},
{&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1},
{&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0},
{&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1},
{&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1},
{&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0},
{&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
{&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1},
{&__pyx_kp_s__2, __pyx_k__2, sizeof(__pyx_k__2), 0, 0, 1, 0},
{&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1},
{&__pyx_n_s_args, __pyx_k_args, sizeof(__pyx_k_args), 0, 0, 1, 1},
{&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1},
{&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1},
{&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1},
{&__pyx_n_s_center_distance_matrix_cy, __pyx_k_center_distance_matrix_cy, sizeof(__pyx_k_center_distance_matrix_cy), 0, 0, 1, 1},
{&__pyx_n_s_centered, __pyx_k_centered, sizeof(__pyx_k_centered), 0, 0, 1, 1},
{&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1},
{&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
{&__pyx_n_s_col, __pyx_k_col, sizeof(__pyx_k_col), 0, 0, 1, 1},
{&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0},
{&__pyx_n_s_d2, __pyx_k_d2, sizeof(__pyx_k_d2), 0, 0, 1, 1},
{&__pyx_n_s_d3, __pyx_k_d3, sizeof(__pyx_k_d3), 0, 0, 1, 1},
{&__pyx_n_s_d4, __pyx_k_d4, sizeof(__pyx_k_d4), 0, 0, 1, 1},
{&__pyx_n_s_d5, __pyx_k_d5, sizeof(__pyx_k_d5), 0, 0, 1, 1},
{&__pyx_n_s_defaults, __pyx_k_defaults, sizeof(__pyx_k_defaults), 0, 0, 1, 1},
{&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1},
{&__pyx_n_s_double, __pyx_k_double, sizeof(__pyx_k_double), 0, 0, 1, 1},
{&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1},
{&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1},
{&__pyx_n_s_dtype_real, __pyx_k_dtype_real, sizeof(__pyx_k_dtype_real), 0, 0, 1, 1},
{&__pyx_n_s_e_matrix_means_cy, __pyx_k_e_matrix_means_cy, sizeof(__pyx_k_e_matrix_means_cy), 0, 0, 1, 1},
{&__pyx_n_s_el0, __pyx_k_el0, sizeof(__pyx_k_el0), 0, 0, 1, 1},
{&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1},
{&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1},
{&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1},
{&__pyx_n_s_f_matrix_inplace_cy, __pyx_k_f_matrix_inplace_cy, sizeof(__pyx_k_f_matrix_inplace_cy), 0, 0, 1, 1},
{&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1},
{&__pyx_n_s_float, __pyx_k_float, sizeof(__pyx_k_float), 0, 0, 1, 1},
{&__pyx_n_s_float32, __pyx_k_float32, sizeof(__pyx_k_float32), 0, 0, 1, 1},
{&__pyx_n_s_float64, __pyx_k_float64, sizeof(__pyx_k_float64), 0, 0, 1, 1},
{&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1},
{&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1},
{&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1},
{&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1},
{&__pyx_n_s_global_mean, __pyx_k_global_mean, sizeof(__pyx_k_global_mean), 0, 0, 1, 1},
{&__pyx_n_s_global_sum, __pyx_k_global_sum, sizeof(__pyx_k_global_sum), 0, 0, 1, 1},
{&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0},
{&__pyx_n_s_gr_mean, __pyx_k_gr_mean, sizeof(__pyx_k_gr_mean), 0, 0, 1, 1},
{&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1},
{&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0},
{&__pyx_n_s_kind, __pyx_k_kind, sizeof(__pyx_k_kind), 0, 0, 1, 1},
{&__pyx_n_s_kwargs, __pyx_k_kwargs, sizeof(__pyx_k_kwargs), 0, 0, 1, 1},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_mat, __pyx_k_mat, sizeof(__pyx_k_mat), 0, 0, 1, 1},
{&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1},
{&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1},
{&__pyx_n_s_n_samples, __pyx_k_n_samples, sizeof(__pyx_k_n_samples), 0, 0, 1, 1},
{&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
{&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1},
{&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1},
{&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1},
{&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0},
{&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1},
{&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
{&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1},
{&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1},
{&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1},
{&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1},
{&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1},
{&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1},
{&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1},
{&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1},
{&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1},
{&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1},
{&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1},
{&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1},
{&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1},
{&__pyx_n_s_row, __pyx_k_row, sizeof(__pyx_k_row), 0, 0, 1, 1},
{&__pyx_n_s_row_means, __pyx_k_row_means, sizeof(__pyx_k_row_means), 0, 0, 1, 1},
{&__pyx_n_s_row_means_np, __pyx_k_row_means_np, sizeof(__pyx_k_row_means_np), 0, 0, 1, 1},
{&__pyx_n_s_row_sum, __pyx_k_row_sum, sizeof(__pyx_k_row_sum), 0, 0, 1, 1},
{&__pyx_n_s_s, __pyx_k_s, sizeof(__pyx_k_s), 0, 0, 1, 1},
{&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1},
{&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1},
{&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1},
{&__pyx_n_s_signatures, __pyx_k_signatures, sizeof(__pyx_k_signatures), 0, 0, 1, 1},
{&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1},
{&__pyx_n_s_skbio_stats_ordination__cutils, __pyx_k_skbio_stats_ordination__cutils, sizeof(__pyx_k_skbio_stats_ordination__cutils), 0, 0, 1, 1},
{&__pyx_kp_s_skbio_stats_ordination__cutils_p, __pyx_k_skbio_stats_ordination__cutils_p, sizeof(__pyx_k_skbio_stats_ordination__cutils_p), 0, 0, 1, 0},
{&__pyx_n_s_split, __pyx_k_split, sizeof(__pyx_k_split), 0, 0, 1, 1},
{&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1},
{&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1},
{&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1},
{&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0},
{&__pyx_n_s_strip, __pyx_k_strip, sizeof(__pyx_k_strip), 0, 0, 1, 1},
{&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1},
{&__pyx_n_s_tcol, __pyx_k_tcol, sizeof(__pyx_k_tcol), 0, 0, 1, 1},
{&__pyx_n_s_tcol_max, __pyx_k_tcol_max, sizeof(__pyx_k_tcol_max), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_n_s_trow, __pyx_k_trow, sizeof(__pyx_k_trow), 0, 0, 1, 1},
{&__pyx_n_s_trow_max, __pyx_k_trow_max, sizeof(__pyx_k_trow_max), 0, 0, 1, 1},
{&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0},
{&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0},
{&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1},
{&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1},
{&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(0, 23, __pyx_L1_error)
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 23, __pyx_L1_error)
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error)
__pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error)
__pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error)
__pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 404, __pyx_L1_error)
__pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 613, __pyx_L1_error)
__pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 832, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "skbio/stats/ordination/_cutils.pyx":23
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def e_matrix_means_cy(TReal[:, ::1] mat, TReal[:, ::1] centered, TReal[::1] row_means): # <<<<<<<<<<<<<<
* """
* Compute E matrix from a distance matrix, and
*/
__pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_No_matching_signature_found); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__3);
__Pyx_GIVEREF(__pyx_tuple__3);
__pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_Function_call_with_ambiguous_arg); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__4);
__Pyx_GIVEREF(__pyx_tuple__4);
/* "View.MemoryView":133
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__5);
__Pyx_GIVEREF(__pyx_tuple__5);
/* "View.MemoryView":136
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if not isinstance(format, bytes):
*/
__pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
/* "View.MemoryView":148
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__7);
__Pyx_GIVEREF(__pyx_tuple__7);
/* "View.MemoryView":176
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 176, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__8);
__Pyx_GIVEREF(__pyx_tuple__8);
/* "View.MemoryView":192
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 192, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__9);
__Pyx_GIVEREF(__pyx_tuple__9);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__10);
__Pyx_GIVEREF(__pyx_tuple__10);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__11);
__Pyx_GIVEREF(__pyx_tuple__11);
/* "View.MemoryView":418
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<<
*
* have_slices, index = _unellipsify(index, self.view.ndim)
*/
__pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 418, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__12);
__Pyx_GIVEREF(__pyx_tuple__12);
/* "View.MemoryView":495
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 495, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__13);
__Pyx_GIVEREF(__pyx_tuple__13);
/* "View.MemoryView":520
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<<
*
* if flags & PyBUF_ND:
*/
__pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 520, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__14);
__Pyx_GIVEREF(__pyx_tuple__14);
/* "View.MemoryView":570
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 570, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__15);
__Pyx_GIVEREF(__pyx_tuple__15);
/* "View.MemoryView":577
* def suboffsets(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__pyx_tuple__16 = PyTuple_New(1); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__16);
__Pyx_INCREF(__pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_int_neg_1);
PyTuple_SET_ITEM(__pyx_tuple__16, 0, __pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_tuple__16);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__17);
__Pyx_GIVEREF(__pyx_tuple__17);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__18);
__Pyx_GIVEREF(__pyx_tuple__18);
/* "View.MemoryView":682
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_slice__19 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__19)) __PYX_ERR(1, 682, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__19);
__Pyx_GIVEREF(__pyx_slice__19);
/* "View.MemoryView":703
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(1, 703, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__20);
__Pyx_GIVEREF(__pyx_tuple__20);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__21);
__Pyx_GIVEREF(__pyx_tuple__21);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__22);
__Pyx_GIVEREF(__pyx_tuple__22);
/* "skbio/stats/ordination/_cutils.pyx":23
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def e_matrix_means_cy(TReal[:, ::1] mat, TReal[:, ::1] centered, TReal[::1] row_means): # <<<<<<<<<<<<<<
* """
* Compute E matrix from a distance matrix, and
*/
__pyx_tuple__23 = PyTuple_Pack(14, __pyx_n_s_mat, __pyx_n_s_centered, __pyx_n_s_row_means, __pyx_n_s_n_samples, __pyx_n_s_d2, __pyx_n_s_d3, __pyx_n_s_d4, __pyx_n_s_d5, __pyx_n_s_row, __pyx_n_s_col, __pyx_n_s_row_sum, __pyx_n_s_el0, __pyx_n_s_global_sum, __pyx_n_s_global_mean); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__23);
__Pyx_GIVEREF(__pyx_tuple__23);
__pyx_codeobj__24 = (PyObject*)__Pyx_PyCode_New(3, 0, 14, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_skbio_stats_ordination__cutils_p, __pyx_n_s_e_matrix_means_cy, 23, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__24)) __PYX_ERR(0, 23, __pyx_L1_error)
/* "skbio/stats/ordination/_cutils.pyx":81
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def f_matrix_inplace_cy(TReal[::1] row_means, TReal global_mean, TReal[:, ::1] centered): # <<<<<<<<<<<<<<
* """
* Compute F matrix from E matrix inplace.
*/
__pyx_tuple__25 = PyTuple_Pack(13, __pyx_n_s_row_means, __pyx_n_s_global_mean, __pyx_n_s_centered, __pyx_n_s_n_samples, __pyx_n_s_d2, __pyx_n_s_d3, __pyx_n_s_trow, __pyx_n_s_tcol, __pyx_n_s_row, __pyx_n_s_col, __pyx_n_s_trow_max, __pyx_n_s_tcol_max, __pyx_n_s_gr_mean); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__25);
__Pyx_GIVEREF(__pyx_tuple__25);
__pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(3, 0, 13, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_skbio_stats_ordination__cutils_p, __pyx_n_s_f_matrix_inplace_cy, 81, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) __PYX_ERR(0, 81, __pyx_L1_error)
/* "skbio/stats/ordination/_cutils.pyx":127
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def center_distance_matrix_cy(TReal[:, ::1] mat, TReal[:, ::1] centered): # <<<<<<<<<<<<<<
* """
* Centers a distance matrix.
*/
__pyx_tuple__27 = PyTuple_Pack(10, __pyx_n_s_mat, __pyx_n_s_centered, __pyx_n_s_n_samples, __pyx_n_s_d2, __pyx_n_s_d3, __pyx_n_s_d4, __pyx_n_s_global_mean, __pyx_n_s_dtype_real, __pyx_n_s_row_means_np, __pyx_n_s_row_means); if (unlikely(!__pyx_tuple__27)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__27);
__Pyx_GIVEREF(__pyx_tuple__27);
__pyx_codeobj__28 = (PyObject*)__Pyx_PyCode_New(2, 0, 10, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__27, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_skbio_stats_ordination__cutils_p, __pyx_n_s_center_distance_matrix_cy, 127, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__28)) __PYX_ERR(0, 127, __pyx_L1_error)
/* "View.MemoryView":286
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_tuple__29 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__29)) __PYX_ERR(1, 286, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__29);
__Pyx_GIVEREF(__pyx_tuple__29);
/* "View.MemoryView":287
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_tuple__30 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__30)) __PYX_ERR(1, 287, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__30);
__Pyx_GIVEREF(__pyx_tuple__30);
/* "View.MemoryView":288
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__31 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__31)) __PYX_ERR(1, 288, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__31);
__Pyx_GIVEREF(__pyx_tuple__31);
/* "View.MemoryView":291
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_tuple__32 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__32)) __PYX_ERR(1, 291, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__32);
__Pyx_GIVEREF(__pyx_tuple__32);
/* "View.MemoryView":292
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__33 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__33)) __PYX_ERR(1, 292, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__33);
__Pyx_GIVEREF(__pyx_tuple__33);
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_tuple__34 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__34)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__34);
__Pyx_GIVEREF(__pyx_tuple__34);
__pyx_codeobj__35 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__34, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__35)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {
/* InitThreads.init */
#ifdef WITH_THREAD
PyEval_InitThreads();
#endif
if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
__pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_3 = PyInt_FromLong(3); if (unlikely(!__pyx_int_3)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/
static int __Pyx_modinit_global_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
/*--- Global init code ---*/
generic = Py_None; Py_INCREF(Py_None);
strided = Py_None; Py_INCREF(Py_None);
indirect = Py_None; Py_INCREF(Py_None);
contiguous = Py_None; Py_INCREF(Py_None);
indirect_contiguous = Py_None; Py_INCREF(Py_None);
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
/*--- Variable export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
/*--- Function export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_type_init_code(void) {
__Pyx_RefNannyDeclarations
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
/*--- Type init code ---*/
__pyx_vtabptr_array = &__pyx_vtable_array;
__pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview;
if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_array.tp_print = 0;
#endif
if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error)
__pyx_array_type = &__pyx_type___pyx_array;
if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_MemviewEnum.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error)
__pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum;
__pyx_vtabptr_memoryview = &__pyx_vtable_memoryview;
__pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer;
__pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice;
__pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment;
__pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar;
__pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed;
__pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object;
__pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object;
if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_memoryview.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error)
__pyx_memoryview_type = &__pyx_type___pyx_memoryview;
__pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice;
__pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview;
__pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object;
__pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object;
__pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type;
if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_memoryviewslice.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error)
__pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice;
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_modinit_type_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
/*--- Type import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
/*--- Variable import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
/*--- Function import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
#ifndef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#elif PY_MAJOR_VERSION < 3
#ifdef __cplusplus
#define __Pyx_PyMODINIT_FUNC extern "C" void
#else
#define __Pyx_PyMODINIT_FUNC void
#endif
#else
#ifdef __cplusplus
#define __Pyx_PyMODINIT_FUNC extern "C" PyObject *
#else
#define __Pyx_PyMODINIT_FUNC PyObject *
#endif
#endif
#if PY_MAJOR_VERSION < 3
__Pyx_PyMODINIT_FUNC init_cutils(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC init_cutils(void)
#else
__Pyx_PyMODINIT_FUNC PyInit__cutils(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC PyInit__cutils(void)
#if CYTHON_PEP489_MULTI_PHASE_INIT
{
return PyModuleDef_Init(&__pyx_moduledef);
}
static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
#if PY_VERSION_HEX >= 0x030700A1
static PY_INT64_T main_interpreter_id = -1;
PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
if (main_interpreter_id == -1) {
main_interpreter_id = current_id;
return (unlikely(current_id == -1)) ? -1 : 0;
} else if (unlikely(main_interpreter_id != current_id))
#else
static PyInterpreterState *main_interpreter = NULL;
PyInterpreterState *current_interpreter = PyThreadState_Get()->interp;
if (!main_interpreter) {
main_interpreter = current_interpreter;
} else if (unlikely(main_interpreter != current_interpreter))
#endif
{
PyErr_SetString(
PyExc_ImportError,
"Interpreter change detected - this module can only be loaded into one interpreter per process.");
return -1;
}
return 0;
}
static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) {
PyObject *value = PyObject_GetAttrString(spec, from_name);
int result = 0;
if (likely(value)) {
if (allow_none || value != Py_None) {
result = PyDict_SetItemString(moddict, to_name, value);
}
Py_DECREF(value);
} else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
} else {
result = -1;
}
return result;
}
static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
PyObject *module = NULL, *moddict, *modname;
if (__Pyx_check_single_interpreter())
return NULL;
if (__pyx_m)
return __Pyx_NewRef(__pyx_m);
modname = PyObject_GetAttrString(spec, "name");
if (unlikely(!modname)) goto bad;
module = PyModule_NewObject(modname);
Py_DECREF(modname);
if (unlikely(!module)) goto bad;
moddict = PyModule_GetDict(module);
if (unlikely(!moddict)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
return module;
bad:
Py_XDECREF(module);
return NULL;
}
static CYTHON_SMALL_CODE int __pyx_pymod_exec__cutils(PyObject *__pyx_pyinit_module)
#endif
#endif
{
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
static PyThread_type_lock __pyx_t_3[8];
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannyDeclarations
#if CYTHON_PEP489_MULTI_PHASE_INIT
if (__pyx_m) {
if (__pyx_m == __pyx_pyinit_module) return 0;
PyErr_SetString(PyExc_RuntimeError, "Module '_cutils' has already been imported. Re-initialisation is not supported.");
return -1;
}
#elif PY_MAJOR_VERSION >= 3
if (__pyx_m) return __Pyx_NewRef(__pyx_m);
#endif
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit__cutils(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pxy_PyFrame_Initialize_Offsets
__Pxy_PyFrame_Initialize_Offsets();
#endif
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_AsyncGen_USED
if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_m = __pyx_pyinit_module;
Py_INCREF(__pyx_m);
#else
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("_cutils", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_b);
__pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_cython_runtime);
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
if (__pyx_module_is_main_skbio__stats__ordination___cutils) {
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "skbio.stats.ordination._cutils")) {
if (unlikely(PyDict_SetItemString(modules, "skbio.stats.ordination._cutils", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Global type/function init code ---*/
(void)__Pyx_modinit_global_init_code();
(void)__Pyx_modinit_variable_export_code();
(void)__Pyx_modinit_function_export_code();
if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
(void)__Pyx_modinit_type_import_code();
(void)__Pyx_modinit_variable_import_code();
(void)__Pyx_modinit_function_import_code();
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/* "skbio/stats/ordination/_cutils.pyx":12
* # distutils: extra_link_args=-fopenmp
*
* import numpy as np # <<<<<<<<<<<<<<
* cimport cython
* from cython.parallel import prange
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 12, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "skbio/stats/ordination/_cutils.pyx":23
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def e_matrix_means_cy(TReal[:, ::1] mat, TReal[:, ::1] centered, TReal[::1] row_means): # <<<<<<<<<<<<<<
* """
* Compute E matrix from a distance matrix, and
*/
__pyx_t_1 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __pyx_FusedFunction_New(&__pyx_fuse_0__pyx_mdef_5skbio_5stats_10ordination_7_cutils_7e_matrix_means_cy, 0, __pyx_n_s_e_matrix_means_cy, NULL, __pyx_n_s_skbio_stats_ordination__cutils, __pyx_d, ((PyObject *)__pyx_codeobj__24)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_CyFunction_SetDefaultsTuple(__pyx_t_2, __pyx_empty_tuple);
if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_float, __pyx_t_2) < 0) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __pyx_FusedFunction_New(&__pyx_fuse_1__pyx_mdef_5skbio_5stats_10ordination_7_cutils_9e_matrix_means_cy, 0, __pyx_n_s_e_matrix_means_cy, NULL, __pyx_n_s_skbio_stats_ordination__cutils, __pyx_d, ((PyObject *)__pyx_codeobj__24)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_CyFunction_SetDefaultsTuple(__pyx_t_2, __pyx_empty_tuple);
if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_double, __pyx_t_2) < 0) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __pyx_FusedFunction_New(&__pyx_mdef_5skbio_5stats_10ordination_7_cutils_1e_matrix_means_cy, 0, __pyx_n_s_e_matrix_means_cy, NULL, __pyx_n_s_skbio_stats_ordination__cutils, __pyx_d, ((PyObject *)__pyx_codeobj__24)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_CyFunction_SetDefaultsTuple(__pyx_t_2, __pyx_empty_tuple);
((__pyx_FusedFunctionObject *) __pyx_t_2)->__signatures__ = __pyx_t_1;
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
if (PyDict_SetItem(__pyx_d, __pyx_n_s_e_matrix_means_cy, __pyx_t_2) < 0) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "skbio/stats/ordination/_cutils.pyx":81
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def f_matrix_inplace_cy(TReal[::1] row_means, TReal global_mean, TReal[:, ::1] centered): # <<<<<<<<<<<<<<
* """
* Compute F matrix from E matrix inplace.
*/
__pyx_t_2 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = __pyx_FusedFunction_New(&__pyx_fuse_0__pyx_mdef_5skbio_5stats_10ordination_7_cutils_13f_matrix_inplace_cy, 0, __pyx_n_s_f_matrix_inplace_cy, NULL, __pyx_n_s_skbio_stats_ordination__cutils, __pyx_d, ((PyObject *)__pyx_codeobj__26)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_CyFunction_SetDefaultsTuple(__pyx_t_1, __pyx_empty_tuple);
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_float, __pyx_t_1) < 0) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __pyx_FusedFunction_New(&__pyx_fuse_1__pyx_mdef_5skbio_5stats_10ordination_7_cutils_15f_matrix_inplace_cy, 0, __pyx_n_s_f_matrix_inplace_cy, NULL, __pyx_n_s_skbio_stats_ordination__cutils, __pyx_d, ((PyObject *)__pyx_codeobj__26)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_CyFunction_SetDefaultsTuple(__pyx_t_1, __pyx_empty_tuple);
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_double, __pyx_t_1) < 0) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __pyx_FusedFunction_New(&__pyx_mdef_5skbio_5stats_10ordination_7_cutils_3f_matrix_inplace_cy, 0, __pyx_n_s_f_matrix_inplace_cy, NULL, __pyx_n_s_skbio_stats_ordination__cutils, __pyx_d, ((PyObject *)__pyx_codeobj__26)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_CyFunction_SetDefaultsTuple(__pyx_t_1, __pyx_empty_tuple);
((__pyx_FusedFunctionObject *) __pyx_t_1)->__signatures__ = __pyx_t_2;
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
if (PyDict_SetItem(__pyx_d, __pyx_n_s_f_matrix_inplace_cy, __pyx_t_1) < 0) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "skbio/stats/ordination/_cutils.pyx":127
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def center_distance_matrix_cy(TReal[:, ::1] mat, TReal[:, ::1] centered): # <<<<<<<<<<<<<<
* """
* Centers a distance matrix.
*/
__pyx_t_1 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __pyx_FusedFunction_New(&__pyx_fuse_0__pyx_mdef_5skbio_5stats_10ordination_7_cutils_19center_distance_matrix_cy, 0, __pyx_n_s_center_distance_matrix_cy, NULL, __pyx_n_s_skbio_stats_ordination__cutils, __pyx_d, ((PyObject *)__pyx_codeobj__28)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_CyFunction_SetDefaultsTuple(__pyx_t_2, __pyx_empty_tuple);
if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_float, __pyx_t_2) < 0) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __pyx_FusedFunction_New(&__pyx_fuse_1__pyx_mdef_5skbio_5stats_10ordination_7_cutils_21center_distance_matrix_cy, 0, __pyx_n_s_center_distance_matrix_cy, NULL, __pyx_n_s_skbio_stats_ordination__cutils, __pyx_d, ((PyObject *)__pyx_codeobj__28)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_CyFunction_SetDefaultsTuple(__pyx_t_2, __pyx_empty_tuple);
if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_double, __pyx_t_2) < 0) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __pyx_FusedFunction_New(&__pyx_mdef_5skbio_5stats_10ordination_7_cutils_5center_distance_matrix_cy, 0, __pyx_n_s_center_distance_matrix_cy, NULL, __pyx_n_s_skbio_stats_ordination__cutils, __pyx_d, ((PyObject *)__pyx_codeobj__28)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_CyFunction_SetDefaultsTuple(__pyx_t_2, __pyx_empty_tuple);
((__pyx_FusedFunctionObject *) __pyx_t_2)->__signatures__ = __pyx_t_1;
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
if (PyDict_SetItem(__pyx_d, __pyx_n_s_center_distance_matrix_cy, __pyx_t_2) < 0) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "skbio/stats/ordination/_cutils.pyx":1
* # ----------------------------------------------------------------------------- # <<<<<<<<<<<<<<
* # Copyright (c) 2013--, scikit-bio development team.
* #
*/
__pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":209
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
* def __dealloc__(array self):
*/
__pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 209, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) __PYX_ERR(1, 209, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
PyType_Modified(__pyx_array_type);
/* "View.MemoryView":286
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__29, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 286, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XGOTREF(generic);
__Pyx_DECREF_SET(generic, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":287
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__30, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 287, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XGOTREF(strided);
__Pyx_DECREF_SET(strided, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":288
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__31, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 288, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XGOTREF(indirect);
__Pyx_DECREF_SET(indirect, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":291
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__32, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 291, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XGOTREF(contiguous);
__Pyx_DECREF_SET(contiguous, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":292
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__33, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 292, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XGOTREF(indirect_contiguous);
__Pyx_DECREF_SET(indirect_contiguous, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":316
*
* DEF THREAD_LOCKS_PREALLOCATED = 8
* cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<<
* cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [
* PyThread_allocate_lock(),
*/
__pyx_memoryview_thread_locks_used = 0;
/* "View.MemoryView":317
* DEF THREAD_LOCKS_PREALLOCATED = 8
* cdef int __pyx_memoryview_thread_locks_used = 0
* cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<<
* PyThread_allocate_lock(),
* PyThread_allocate_lock(),
*/
__pyx_t_3[0] = PyThread_allocate_lock();
__pyx_t_3[1] = PyThread_allocate_lock();
__pyx_t_3[2] = PyThread_allocate_lock();
__pyx_t_3[3] = PyThread_allocate_lock();
__pyx_t_3[4] = PyThread_allocate_lock();
__pyx_t_3[5] = PyThread_allocate_lock();
__pyx_t_3[6] = PyThread_allocate_lock();
__pyx_t_3[7] = PyThread_allocate_lock();
memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_3, sizeof(__pyx_memoryview_thread_locks[0]) * (8));
/* "View.MemoryView":549
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 549, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) __PYX_ERR(1, 549, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
PyType_Modified(__pyx_memoryview_type);
/* "View.MemoryView":995
* return self.from_object
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 995, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) __PYX_ERR(1, 995, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
PyType_Modified(__pyx_memoryviewslice_type);
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_2) < 0) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init skbio.stats.ordination._cutils", __pyx_clineno, __pyx_lineno, __pyx_filename);
}
Py_CLEAR(__pyx_m);
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init skbio.stats.ordination._cutils");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if CYTHON_PEP489_MULTI_PHASE_INIT
return (__pyx_m != NULL) ? 0 : -1;
#elif PY_MAJOR_VERSION >= 3
return __pyx_m;
#else
return;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule(modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, "RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* PyObjectGetAttrStr */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#endif
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
/* RaiseDoubleKeywords */
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
/* ParseKeywords */
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
/* DictGetItem */
#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
PyObject *value;
value = PyDict_GetItemWithError(d, key);
if (unlikely(!value)) {
if (!PyErr_Occurred()) {
if (unlikely(PyTuple_Check(key))) {
PyObject* args = PyTuple_Pack(1, key);
if (likely(args)) {
PyErr_SetObject(PyExc_KeyError, args);
Py_DECREF(args);
}
} else {
PyErr_SetObject(PyExc_KeyError, key);
}
}
return NULL;
}
Py_INCREF(value);
return value;
}
#endif
/* PyCFunctionFastCall */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
PyCFunction meth = PyCFunction_GET_FUNCTION(func);
PyObject *self = PyCFunction_GET_SELF(func);
int flags = PyCFunction_GET_FLAGS(func);
assert(PyCFunction_Check(func));
assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)));
assert(nargs >= 0);
assert(nargs == 0 || args != NULL);
/* _PyCFunction_FastCallDict() must not be called with an exception set,
because it may clear it (directly or indirectly) and so the
caller loses its exception */
assert(!PyErr_Occurred());
if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL);
} else {
return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs);
}
}
#endif
/* PyFunctionFastCall */
#if CYTHON_FAST_PYCALL
static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
PyObject *globals) {
PyFrameObject *f;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject **fastlocals;
Py_ssize_t i;
PyObject *result;
assert(globals != NULL);
/* XXX Perhaps we should create a specialized
PyFrame_New() that doesn't take locals, but does
take builtins without sanity checking them.
*/
assert(tstate != NULL);
f = PyFrame_New(tstate, co, globals, NULL);
if (f == NULL) {
return NULL;
}
fastlocals = __Pyx_PyFrame_GetLocalsplus(f);
for (i = 0; i < na; i++) {
Py_INCREF(*args);
fastlocals[i] = *args++;
}
result = PyEval_EvalFrameEx(f,0);
++tstate->recursion_depth;
Py_DECREF(f);
--tstate->recursion_depth;
return result;
}
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) {
PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
PyObject *globals = PyFunction_GET_GLOBALS(func);
PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
PyObject *closure;
#if PY_MAJOR_VERSION >= 3
PyObject *kwdefs;
#endif
PyObject *kwtuple, **k;
PyObject **d;
Py_ssize_t nd;
Py_ssize_t nk;
PyObject *result;
assert(kwargs == NULL || PyDict_Check(kwargs));
nk = kwargs ? PyDict_Size(kwargs) : 0;
if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
return NULL;
}
if (
#if PY_MAJOR_VERSION >= 3
co->co_kwonlyargcount == 0 &&
#endif
likely(kwargs == NULL || nk == 0) &&
co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
if (argdefs == NULL && co->co_argcount == nargs) {
result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
goto done;
}
else if (nargs == 0 && argdefs != NULL
&& co->co_argcount == Py_SIZE(argdefs)) {
/* function called with no arguments, but all parameters have
a default value: use default values as arguments .*/
args = &PyTuple_GET_ITEM(argdefs, 0);
result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
goto done;
}
}
if (kwargs != NULL) {
Py_ssize_t pos, i;
kwtuple = PyTuple_New(2 * nk);
if (kwtuple == NULL) {
result = NULL;
goto done;
}
k = &PyTuple_GET_ITEM(kwtuple, 0);
pos = i = 0;
while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
Py_INCREF(k[i]);
Py_INCREF(k[i+1]);
i += 2;
}
nk = i / 2;
}
else {
kwtuple = NULL;
k = NULL;
}
closure = PyFunction_GET_CLOSURE(func);
#if PY_MAJOR_VERSION >= 3
kwdefs = PyFunction_GET_KW_DEFAULTS(func);
#endif
if (argdefs != NULL) {
d = &PyTuple_GET_ITEM(argdefs, 0);
nd = Py_SIZE(argdefs);
}
else {
d = NULL;
nd = 0;
}
#if PY_MAJOR_VERSION >= 3
result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, kwdefs, closure);
#else
result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, closure);
#endif
Py_XDECREF(kwtuple);
done:
Py_LeaveRecursiveCall();
return result;
}
#endif
#endif
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = func->ob_type->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallMethO */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallOneArg */
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, &arg, 1);
}
#endif
if (likely(PyCFunction_Check(func))) {
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
return __Pyx_PyObject_CallMethO(func, arg);
#if CYTHON_FAST_PYCCALL
} else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) {
return __Pyx_PyCFunction_FastCall(func, &arg, 1);
#endif
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_Pack(1, arg);
if (unlikely(!args)) return NULL;
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
#endif
/* PyErrFetchRestore */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
if (cause) {
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* UnicodeAsUCS4 */
static CYTHON_INLINE Py_UCS4 __Pyx_PyUnicode_AsPy_UCS4(PyObject* x) {
Py_ssize_t length;
#if CYTHON_PEP393_ENABLED
length = PyUnicode_GET_LENGTH(x);
if (likely(length == 1)) {
return PyUnicode_READ_CHAR(x, 0);
}
#else
length = PyUnicode_GET_SIZE(x);
if (likely(length == 1)) {
return PyUnicode_AS_UNICODE(x)[0];
}
#if Py_UNICODE_SIZE == 2
else if (PyUnicode_GET_SIZE(x) == 2) {
Py_UCS4 high_val = PyUnicode_AS_UNICODE(x)[0];
if (high_val >= 0xD800 && high_val <= 0xDBFF) {
Py_UCS4 low_val = PyUnicode_AS_UNICODE(x)[1];
if (low_val >= 0xDC00 && low_val <= 0xDFFF) {
return 0x10000 + (((high_val & ((1<<10)-1)) << 10) | (low_val & ((1<<10)-1)));
}
}
}
#endif
#endif
PyErr_Format(PyExc_ValueError,
"only single character unicode strings can be converted to Py_UCS4, "
"got length %" CYTHON_FORMAT_SSIZE_T "d", length);
return (Py_UCS4)-1;
}
/* object_ord */
static long __Pyx__PyObject_Ord(PyObject* c) {
Py_ssize_t size;
if (PyBytes_Check(c)) {
size = PyBytes_GET_SIZE(c);
if (likely(size == 1)) {
return (unsigned char) PyBytes_AS_STRING(c)[0];
}
#if PY_MAJOR_VERSION < 3
} else if (PyUnicode_Check(c)) {
return (long)__Pyx_PyUnicode_AsPy_UCS4(c);
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
} else if (PyByteArray_Check(c)) {
size = PyByteArray_GET_SIZE(c);
if (likely(size == 1)) {
return (unsigned char) PyByteArray_AS_STRING(c)[0];
}
#endif
} else {
PyErr_Format(PyExc_TypeError,
"ord() expected string of length 1, but %.200s found", c->ob_type->tp_name);
return (long)(Py_UCS4)-1;
}
PyErr_Format(PyExc_TypeError,
"ord() expected a character, but string of length %zd found", size);
return (long)(Py_UCS4)-1;
}
/* SetItemInt */
static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) {
int r;
if (!j) return -1;
r = PyObject_SetItem(o, j, v);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, int is_list,
CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = (!wraparound) ? i : ((likely(i >= 0)) ? i : i + PyList_GET_SIZE(o));
if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o)))) {
PyObject* old = PyList_GET_ITEM(o, n);
Py_INCREF(v);
PyList_SET_ITEM(o, n, v);
Py_DECREF(old);
return 1;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_ass_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return -1;
PyErr_Clear();
}
}
return m->sq_ass_item(o, i, v);
}
}
#else
#if CYTHON_COMPILING_IN_PYPY
if (is_list || (PySequence_Check(o) && !PyDict_Check(o)))
#else
if (is_list || PySequence_Check(o))
#endif
{
return PySequence_SetItem(o, i, v);
}
#endif
return __Pyx_SetItemInt_Generic(o, PyInt_FromSsize_t(i), v);
}
/* IterFinish */
static CYTHON_INLINE int __Pyx_IterFinish(void) {
#if CYTHON_FAST_THREAD_STATE
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* exc_type = tstate->curexc_type;
if (unlikely(exc_type)) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) {
PyObject *exc_value, *exc_tb;
exc_value = tstate->curexc_value;
exc_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
Py_DECREF(exc_type);
Py_XDECREF(exc_value);
Py_XDECREF(exc_tb);
return 0;
} else {
return -1;
}
}
return 0;
#else
if (unlikely(PyErr_Occurred())) {
if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) {
PyErr_Clear();
return 0;
} else {
return -1;
}
}
return 0;
#endif
}
/* PyObjectCallNoArg */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, NULL, 0);
}
#endif
#ifdef __Pyx_CyFunction_USED
if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func)))
#else
if (likely(PyCFunction_Check(func)))
#endif
{
if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) {
return __Pyx_PyObject_CallMethO(func, NULL);
}
}
return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL);
}
#endif
/* PyObjectGetMethod */
static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method) {
PyObject *attr;
#if CYTHON_UNPACK_METHODS && CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_PYTYPE_LOOKUP
PyTypeObject *tp = Py_TYPE(obj);
PyObject *descr;
descrgetfunc f = NULL;
PyObject **dictptr, *dict;
int meth_found = 0;
assert (*method == NULL);
if (unlikely(tp->tp_getattro != PyObject_GenericGetAttr)) {
attr = __Pyx_PyObject_GetAttrStr(obj, name);
goto try_unpack;
}
if (unlikely(tp->tp_dict == NULL) && unlikely(PyType_Ready(tp) < 0)) {
return 0;
}
descr = _PyType_Lookup(tp, name);
if (likely(descr != NULL)) {
Py_INCREF(descr);
#if PY_MAJOR_VERSION >= 3
#ifdef __Pyx_CyFunction_USED
if (likely(PyFunction_Check(descr) || (Py_TYPE(descr) == &PyMethodDescr_Type) || __Pyx_CyFunction_Check(descr)))
#else
if (likely(PyFunction_Check(descr) || (Py_TYPE(descr) == &PyMethodDescr_Type)))
#endif
#else
#ifdef __Pyx_CyFunction_USED
if (likely(PyFunction_Check(descr) || __Pyx_CyFunction_Check(descr)))
#else
if (likely(PyFunction_Check(descr)))
#endif
#endif
{
meth_found = 1;
} else {
f = Py_TYPE(descr)->tp_descr_get;
if (f != NULL && PyDescr_IsData(descr)) {
attr = f(descr, obj, (PyObject *)Py_TYPE(obj));
Py_DECREF(descr);
goto try_unpack;
}
}
}
dictptr = _PyObject_GetDictPtr(obj);
if (dictptr != NULL && (dict = *dictptr) != NULL) {
Py_INCREF(dict);
attr = __Pyx_PyDict_GetItemStr(dict, name);
if (attr != NULL) {
Py_INCREF(attr);
Py_DECREF(dict);
Py_XDECREF(descr);
goto try_unpack;
}
Py_DECREF(dict);
}
if (meth_found) {
*method = descr;
return 1;
}
if (f != NULL) {
attr = f(descr, obj, (PyObject *)Py_TYPE(obj));
Py_DECREF(descr);
goto try_unpack;
}
if (descr != NULL) {
*method = descr;
return 0;
}
PyErr_Format(PyExc_AttributeError,
#if PY_MAJOR_VERSION >= 3
"'%.50s' object has no attribute '%U'",
tp->tp_name, name);
#else
"'%.50s' object has no attribute '%.400s'",
tp->tp_name, PyString_AS_STRING(name));
#endif
return 0;
#else
attr = __Pyx_PyObject_GetAttrStr(obj, name);
goto try_unpack;
#endif
try_unpack:
#if CYTHON_UNPACK_METHODS
if (likely(attr) && PyMethod_Check(attr) && likely(PyMethod_GET_SELF(attr) == obj)) {
PyObject *function = PyMethod_GET_FUNCTION(attr);
Py_INCREF(function);
Py_DECREF(attr);
*method = function;
return 1;
}
#endif
*method = attr;
return 0;
}
/* PyObjectCallMethod0 */
static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name) {
PyObject *method = NULL, *result = NULL;
int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method);
if (likely(is_method)) {
result = __Pyx_PyObject_CallOneArg(method, obj);
Py_DECREF(method);
return result;
}
if (unlikely(!method)) goto bad;
result = __Pyx_PyObject_CallNoArg(method);
Py_DECREF(method);
bad:
return result;
}
/* RaiseNeedMoreValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
/* RaiseTooManyValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
/* UnpackItemEndCheck */
static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) {
if (unlikely(retval)) {
Py_DECREF(retval);
__Pyx_RaiseTooManyValuesError(expected);
return -1;
} else {
return __Pyx_IterFinish();
}
return 0;
}
/* RaiseNoneIterError */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
/* UnpackTupleError */
static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) {
if (t == Py_None) {
__Pyx_RaiseNoneNotIterableError();
} else if (PyTuple_GET_SIZE(t) < index) {
__Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(t));
} else {
__Pyx_RaiseTooManyValuesError(index);
}
}
/* UnpackTuple2 */
static CYTHON_INLINE int __Pyx_unpack_tuple2_exact(
PyObject* tuple, PyObject** pvalue1, PyObject** pvalue2, int decref_tuple) {
PyObject *value1 = NULL, *value2 = NULL;
#if CYTHON_COMPILING_IN_PYPY
value1 = PySequence_ITEM(tuple, 0); if (unlikely(!value1)) goto bad;
value2 = PySequence_ITEM(tuple, 1); if (unlikely(!value2)) goto bad;
#else
value1 = PyTuple_GET_ITEM(tuple, 0); Py_INCREF(value1);
value2 = PyTuple_GET_ITEM(tuple, 1); Py_INCREF(value2);
#endif
if (decref_tuple) {
Py_DECREF(tuple);
}
*pvalue1 = value1;
*pvalue2 = value2;
return 0;
#if CYTHON_COMPILING_IN_PYPY
bad:
Py_XDECREF(value1);
Py_XDECREF(value2);
if (decref_tuple) { Py_XDECREF(tuple); }
return -1;
#endif
}
static int __Pyx_unpack_tuple2_generic(PyObject* tuple, PyObject** pvalue1, PyObject** pvalue2,
int has_known_size, int decref_tuple) {
Py_ssize_t index;
PyObject *value1 = NULL, *value2 = NULL, *iter = NULL;
iternextfunc iternext;
iter = PyObject_GetIter(tuple);
if (unlikely(!iter)) goto bad;
if (decref_tuple) { Py_DECREF(tuple); tuple = NULL; }
iternext = Py_TYPE(iter)->tp_iternext;
value1 = iternext(iter); if (unlikely(!value1)) { index = 0; goto unpacking_failed; }
value2 = iternext(iter); if (unlikely(!value2)) { index = 1; goto unpacking_failed; }
if (!has_known_size && unlikely(__Pyx_IternextUnpackEndCheck(iternext(iter), 2))) goto bad;
Py_DECREF(iter);
*pvalue1 = value1;
*pvalue2 = value2;
return 0;
unpacking_failed:
if (!has_known_size && __Pyx_IterFinish() == 0)
__Pyx_RaiseNeedMoreValuesError(index);
bad:
Py_XDECREF(iter);
Py_XDECREF(value1);
Py_XDECREF(value2);
if (decref_tuple) { Py_XDECREF(tuple); }
return -1;
}
/* dict_iter */
static CYTHON_INLINE PyObject* __Pyx_dict_iterator(PyObject* iterable, int is_dict, PyObject* method_name,
Py_ssize_t* p_orig_length, int* p_source_is_dict) {
is_dict = is_dict || likely(PyDict_CheckExact(iterable));
*p_source_is_dict = is_dict;
if (is_dict) {
#if !CYTHON_COMPILING_IN_PYPY
*p_orig_length = PyDict_Size(iterable);
Py_INCREF(iterable);
return iterable;
#elif PY_MAJOR_VERSION >= 3
static PyObject *py_items = NULL, *py_keys = NULL, *py_values = NULL;
PyObject **pp = NULL;
if (method_name) {
const char *name = PyUnicode_AsUTF8(method_name);
if (strcmp(name, "iteritems") == 0) pp = &py_items;
else if (strcmp(name, "iterkeys") == 0) pp = &py_keys;
else if (strcmp(name, "itervalues") == 0) pp = &py_values;
if (pp) {
if (!*pp) {
*pp = PyUnicode_FromString(name + 4);
if (!*pp)
return NULL;
}
method_name = *pp;
}
}
#endif
}
*p_orig_length = 0;
if (method_name) {
PyObject* iter;
iterable = __Pyx_PyObject_CallMethod0(iterable, method_name);
if (!iterable)
return NULL;
#if !CYTHON_COMPILING_IN_PYPY
if (PyTuple_CheckExact(iterable) || PyList_CheckExact(iterable))
return iterable;
#endif
iter = PyObject_GetIter(iterable);
Py_DECREF(iterable);
return iter;
}
return PyObject_GetIter(iterable);
}
static CYTHON_INLINE int __Pyx_dict_iter_next(
PyObject* iter_obj, CYTHON_NCP_UNUSED Py_ssize_t orig_length, CYTHON_NCP_UNUSED Py_ssize_t* ppos,
PyObject** pkey, PyObject** pvalue, PyObject** pitem, int source_is_dict) {
PyObject* next_item;
#if !CYTHON_COMPILING_IN_PYPY
if (source_is_dict) {
PyObject *key, *value;
if (unlikely(orig_length != PyDict_Size(iter_obj))) {
PyErr_SetString(PyExc_RuntimeError, "dictionary changed size during iteration");
return -1;
}
if (unlikely(!PyDict_Next(iter_obj, ppos, &key, &value))) {
return 0;
}
if (pitem) {
PyObject* tuple = PyTuple_New(2);
if (unlikely(!tuple)) {
return -1;
}
Py_INCREF(key);
Py_INCREF(value);
PyTuple_SET_ITEM(tuple, 0, key);
PyTuple_SET_ITEM(tuple, 1, value);
*pitem = tuple;
} else {
if (pkey) {
Py_INCREF(key);
*pkey = key;
}
if (pvalue) {
Py_INCREF(value);
*pvalue = value;
}
}
return 1;
} else if (PyTuple_CheckExact(iter_obj)) {
Py_ssize_t pos = *ppos;
if (unlikely(pos >= PyTuple_GET_SIZE(iter_obj))) return 0;
*ppos = pos + 1;
next_item = PyTuple_GET_ITEM(iter_obj, pos);
Py_INCREF(next_item);
} else if (PyList_CheckExact(iter_obj)) {
Py_ssize_t pos = *ppos;
if (unlikely(pos >= PyList_GET_SIZE(iter_obj))) return 0;
*ppos = pos + 1;
next_item = PyList_GET_ITEM(iter_obj, pos);
Py_INCREF(next_item);
} else
#endif
{
next_item = PyIter_Next(iter_obj);
if (unlikely(!next_item)) {
return __Pyx_IterFinish();
}
}
if (pitem) {
*pitem = next_item;
} else if (pkey && pvalue) {
if (__Pyx_unpack_tuple2(next_item, pkey, pvalue, source_is_dict, source_is_dict, 1))
return -1;
} else if (pkey) {
*pkey = next_item;
} else {
*pvalue = next_item;
}
return 1;
}
/* PyObjectCall2Args */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) {
PyObject *args, *result = NULL;
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyFunction_FastCall(function, args, 2);
}
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyCFunction_FastCall(function, args, 2);
}
#endif
args = PyTuple_New(2);
if (unlikely(!args)) goto done;
Py_INCREF(arg1);
PyTuple_SET_ITEM(args, 0, arg1);
Py_INCREF(arg2);
PyTuple_SET_ITEM(args, 1, arg2);
Py_INCREF(function);
result = __Pyx_PyObject_Call(function, args, NULL);
Py_DECREF(args);
Py_DECREF(function);
done:
return result;
}
/* GetItemInt */
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyList_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyTuple_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return NULL;
PyErr_Clear();
}
}
return m->sq_item(o, i);
}
}
#else
if (is_list || PySequence_Check(o)) {
return PySequence_GetItem(o, i);
}
#endif
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
/* MemviewSliceInit */
static int
__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference)
{
__Pyx_RefNannyDeclarations
int i, retval=-1;
Py_buffer *buf = &memview->view;
__Pyx_RefNannySetupContext("init_memviewslice", 0);
if (unlikely(memviewslice->memview || memviewslice->data)) {
PyErr_SetString(PyExc_ValueError,
"memviewslice is already initialized!");
goto fail;
}
if (buf->strides) {
for (i = 0; i < ndim; i++) {
memviewslice->strides[i] = buf->strides[i];
}
} else {
Py_ssize_t stride = buf->itemsize;
for (i = ndim - 1; i >= 0; i--) {
memviewslice->strides[i] = stride;
stride *= buf->shape[i];
}
}
for (i = 0; i < ndim; i++) {
memviewslice->shape[i] = buf->shape[i];
if (buf->suboffsets) {
memviewslice->suboffsets[i] = buf->suboffsets[i];
} else {
memviewslice->suboffsets[i] = -1;
}
}
memviewslice->memview = memview;
memviewslice->data = (char *)buf->buf;
if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) {
Py_INCREF(memview);
}
retval = 0;
goto no_fail;
fail:
memviewslice->memview = 0;
memviewslice->data = 0;
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
#ifndef Py_NO_RETURN
#define Py_NO_RETURN
#endif
static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN {
va_list vargs;
char msg[200];
#ifdef HAVE_STDARG_PROTOTYPES
va_start(vargs, fmt);
#else
va_start(vargs);
#endif
vsnprintf(msg, 200, fmt, vargs);
va_end(vargs);
Py_FatalError(msg);
}
static CYTHON_INLINE int
__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)++;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE int
__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)--;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE void
__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno)
{
int first_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (unlikely(!memview || (PyObject *) memview == Py_None))
return;
if (unlikely(__pyx_get_slice_count(memview) < 0))
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
first_time = __pyx_add_acquisition_count(memview) == 0;
if (unlikely(first_time)) {
if (have_gil) {
Py_INCREF((PyObject *) memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_INCREF((PyObject *) memview);
PyGILState_Release(_gilstate);
}
}
}
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice,
int have_gil, int lineno) {
int last_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (unlikely(!memview || (PyObject *) memview == Py_None)) {
memslice->memview = NULL;
return;
}
if (unlikely(__pyx_get_slice_count(memview) <= 0))
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
last_time = __pyx_sub_acquisition_count(memview) == 1;
memslice->data = NULL;
if (unlikely(last_time)) {
if (have_gil) {
Py_CLEAR(memslice->memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_CLEAR(memslice->memview);
PyGILState_Release(_gilstate);
}
} else {
memslice->memview = NULL;
}
}
/* PyDictVersioning */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
PyObject **dictptr = NULL;
Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
if (offset) {
#if CYTHON_COMPILING_IN_CPYTHON
dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
#else
dictptr = _PyObject_GetDictPtr(obj);
#endif
}
return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
}
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
return 0;
return obj_dict_version == __Pyx_get_object_dict_version(obj);
}
#endif
/* GetModuleGlobalName */
#if CYTHON_USE_DICT_VERSIONS
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value)
#else
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name)
#endif
{
PyObject *result;
#if !CYTHON_AVOID_BORROWED_REFS
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
} else if (unlikely(PyErr_Occurred())) {
return NULL;
}
#else
result = PyDict_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
#endif
#else
result = PyObject_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
PyErr_Clear();
#endif
return __Pyx_GetBuiltinName(name);
}
/* ArgTypeTest */
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
else if (exact) {
#if PY_MAJOR_VERSION == 2
if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(__Pyx_TypeCheck(obj, type))) return 1;
}
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
return 0;
}
/* BytesEquals */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
if (s1 == s2) {
return (equals == Py_EQ);
} else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) {
const char *ps1, *ps2;
Py_ssize_t length = PyBytes_GET_SIZE(s1);
if (length != PyBytes_GET_SIZE(s2))
return (equals == Py_NE);
ps1 = PyBytes_AS_STRING(s1);
ps2 = PyBytes_AS_STRING(s2);
if (ps1[0] != ps2[0]) {
return (equals == Py_NE);
} else if (length == 1) {
return (equals == Py_EQ);
} else {
int result;
#if CYTHON_USE_UNICODE_INTERNALS
Py_hash_t hash1, hash2;
hash1 = ((PyBytesObject*)s1)->ob_shash;
hash2 = ((PyBytesObject*)s2)->ob_shash;
if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
return (equals == Py_NE);
}
#endif
result = memcmp(ps1, ps2, (size_t)length);
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) {
return (equals == Py_NE);
} else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) {
return (equals == Py_NE);
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
#endif
}
/* UnicodeEquals */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
#if PY_MAJOR_VERSION < 3
PyObject* owned_ref = NULL;
#endif
int s1_is_unicode, s2_is_unicode;
if (s1 == s2) {
goto return_eq;
}
s1_is_unicode = PyUnicode_CheckExact(s1);
s2_is_unicode = PyUnicode_CheckExact(s2);
#if PY_MAJOR_VERSION < 3
if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) {
owned_ref = PyUnicode_FromObject(s2);
if (unlikely(!owned_ref))
return -1;
s2 = owned_ref;
s2_is_unicode = 1;
} else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) {
owned_ref = PyUnicode_FromObject(s1);
if (unlikely(!owned_ref))
return -1;
s1 = owned_ref;
s1_is_unicode = 1;
} else if (((!s2_is_unicode) & (!s1_is_unicode))) {
return __Pyx_PyBytes_Equals(s1, s2, equals);
}
#endif
if (s1_is_unicode & s2_is_unicode) {
Py_ssize_t length;
int kind;
void *data1, *data2;
if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0))
return -1;
length = __Pyx_PyUnicode_GET_LENGTH(s1);
if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) {
goto return_ne;
}
#if CYTHON_USE_UNICODE_INTERNALS
{
Py_hash_t hash1, hash2;
#if CYTHON_PEP393_ENABLED
hash1 = ((PyASCIIObject*)s1)->hash;
hash2 = ((PyASCIIObject*)s2)->hash;
#else
hash1 = ((PyUnicodeObject*)s1)->hash;
hash2 = ((PyUnicodeObject*)s2)->hash;
#endif
if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
goto return_ne;
}
}
#endif
kind = __Pyx_PyUnicode_KIND(s1);
if (kind != __Pyx_PyUnicode_KIND(s2)) {
goto return_ne;
}
data1 = __Pyx_PyUnicode_DATA(s1);
data2 = __Pyx_PyUnicode_DATA(s2);
if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) {
goto return_ne;
} else if (length == 1) {
goto return_eq;
} else {
int result = memcmp(data1, data2, (size_t)(length * kind));
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & s2_is_unicode) {
goto return_ne;
} else if ((s2 == Py_None) & s1_is_unicode) {
goto return_ne;
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
return_eq:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ);
return_ne:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_NE);
#endif
}
/* None */
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) {
Py_ssize_t q = a / b;
Py_ssize_t r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
/* GetAttr */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
#if CYTHON_USE_TYPE_SLOTS
#if PY_MAJOR_VERSION >= 3
if (likely(PyUnicode_Check(n)))
#else
if (likely(PyString_Check(n)))
#endif
return __Pyx_PyObject_GetAttrStr(o, n);
#endif
return PyObject_GetAttr(o, n);
}
/* ObjectGetItem */
#if CYTHON_USE_TYPE_SLOTS
static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) {
PyObject *runerr;
Py_ssize_t key_value;
PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence;
if (unlikely(!(m && m->sq_item))) {
PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name);
return NULL;
}
key_value = __Pyx_PyIndex_AsSsize_t(index);
if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) {
return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1);
}
if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) {
PyErr_Clear();
PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name);
}
return NULL;
}
static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) {
PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping;
if (likely(m && m->mp_subscript)) {
return m->mp_subscript(obj, key);
}
return __Pyx_PyObject_GetIndex(obj, key);
}
#endif
/* decode_c_string */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
Py_ssize_t length;
if (unlikely((start < 0) | (stop < 0))) {
size_t slen = strlen(cstring);
if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) {
PyErr_SetString(PyExc_OverflowError,
"c-string too long to convert to Python");
return NULL;
}
length = (Py_ssize_t) slen;
if (start < 0) {
start += length;
if (start < 0)
start = 0;
}
if (stop < 0)
stop += length;
}
if (unlikely(stop <= start))
return __Pyx_NewRef(__pyx_empty_unicode);
length = stop - start;
cstring += start;
if (decode_func) {
return decode_func(cstring, length, errors);
} else {
return PyUnicode_Decode(cstring, length, encoding, errors);
}
}
/* PyErrExceptionMatches */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1;
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
PyObject *exc_type = tstate->curexc_type;
if (exc_type == err) return 1;
if (unlikely(!exc_type)) return 0;
if (unlikely(PyTuple_Check(err)))
return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);
return __Pyx_PyErr_GivenExceptionMatches(exc_type, err);
}
#endif
/* GetAttr3 */
static PyObject *__Pyx_GetAttr3Default(PyObject *d) {
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
return NULL;
__Pyx_PyErr_Clear();
Py_INCREF(d);
return d;
}
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) {
PyObject *r = __Pyx_GetAttr(o, n);
return (likely(r)) ? r : __Pyx_GetAttr3Default(d);
}
/* ExtTypeTest */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(__Pyx_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
/* GetTopmostException */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem *
__Pyx_PyErr_GetTopmostException(PyThreadState *tstate)
{
_PyErr_StackItem *exc_info = tstate->exc_info;
while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) &&
exc_info->previous_item != NULL)
{
exc_info = exc_info->previous_item;
}
return exc_info;
}
#endif
/* SaveResetException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);
*type = exc_info->exc_type;
*value = exc_info->exc_value;
*tb = exc_info->exc_traceback;
#else
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
#endif
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
}
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = type;
exc_info->exc_value = value;
exc_info->exc_traceback = tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
#endif
/* GetException */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb)
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb)
#endif
{
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_FAST_THREAD_STATE
PyObject *tmp_type, *tmp_value, *tmp_tb;
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_FAST_THREAD_STATE
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
Py_XINCREF(local_tb);
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_FAST_THREAD_STATE
#if CYTHON_USE_EXC_INFO_STACK
{
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = local_type;
exc_info->exc_value = local_value;
exc_info->exc_traceback = local_tb;
}
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
/* SwapException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = *type;
exc_info->exc_value = *value;
exc_info->exc_traceback = *tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = *type;
tstate->exc_value = *value;
tstate->exc_traceback = *tb;
#endif
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb);
PyErr_SetExcInfo(*type, *value, *tb);
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#endif
/* Import */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_MAJOR_VERSION < 3
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) {
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_MAJOR_VERSION < 3
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, (PyObject *)NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
/* FastTypeChecks */
#if CYTHON_COMPILING_IN_CPYTHON
static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
while (a) {
a = a->tp_base;
if (a == b)
return 1;
}
return b == &PyBaseObject_Type;
}
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
PyObject *mro;
if (a == b) return 1;
mro = a->tp_mro;
if (likely(mro)) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(mro);
for (i = 0; i < n; i++) {
if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
return 1;
}
return 0;
}
return __Pyx_InBases(a, b);
}
#if PY_MAJOR_VERSION == 2
static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
PyObject *exception, *value, *tb;
int res;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&exception, &value, &tb);
res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
if (!res) {
res = PyObject_IsSubclass(err, exc_type2);
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
}
__Pyx_ErrRestore(exception, value, tb);
return res;
}
#else
static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
if (!res) {
res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
}
return res;
}
#endif
static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
assert(PyExceptionClass_Check(exc_type));
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
PyObject *t = PyTuple_GET_ITEM(tuple, i);
#if PY_MAJOR_VERSION < 3
if (likely(exc_type == t)) return 1;
#endif
if (likely(PyExceptionClass_Check(t))) {
if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
} else {
}
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
if (likely(err == exc_type)) return 1;
if (likely(PyExceptionClass_Check(err))) {
if (likely(PyExceptionClass_Check(exc_type))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
} else if (likely(PyTuple_Check(exc_type))) {
return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
} else {
}
}
return PyErr_GivenExceptionMatches(err, exc_type);
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
assert(PyExceptionClass_Check(exc_type1));
assert(PyExceptionClass_Check(exc_type2));
if (likely(err == exc_type1 || err == exc_type2)) return 1;
if (likely(PyExceptionClass_Check(err))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
}
return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
}
#endif
/* PyIntBinop */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) {
(void)inplace;
(void)zerodivision_check;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(op1))) {
const long b = intval;
long x;
long a = PyInt_AS_LONG(op1);
x = (long)((unsigned long)a + b);
if (likely((x^a) >= 0 || (x^b) >= 0))
return PyInt_FromLong(x);
return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
#endif
#if CYTHON_USE_PYLONG_INTERNALS
if (likely(PyLong_CheckExact(op1))) {
const long b = intval;
long a, x;
#ifdef HAVE_LONG_LONG
const PY_LONG_LONG llb = intval;
PY_LONG_LONG lla, llx;
#endif
const digit* digits = ((PyLongObject*)op1)->ob_digit;
const Py_ssize_t size = Py_SIZE(op1);
if (likely(__Pyx_sst_abs(size) <= 1)) {
a = likely(size) ? digits[0] : 0;
if (size == -1) a = -a;
} else {
switch (size) {
case -2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
default: return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
}
x = a + b;
return PyLong_FromLong(x);
#ifdef HAVE_LONG_LONG
long_long:
llx = lla + llb;
return PyLong_FromLongLong(llx);
#endif
}
#endif
if (PyFloat_CheckExact(op1)) {
const long b = intval;
double a = PyFloat_AS_DOUBLE(op1);
double result;
PyFPE_START_PROTECT("add", return NULL)
result = ((double)a) + (double)b;
PyFPE_END_PROTECT(result)
return PyFloat_FromDouble(result);
}
return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2);
}
#endif
/* None */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
/* None */
static CYTHON_INLINE long __Pyx_div_long(long a, long b) {
long q = a / b;
long r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
/* ImportFrom */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Format(PyExc_ImportError,
#if PY_MAJOR_VERSION < 3
"cannot import name %.230s", PyString_AS_STRING(name));
#else
"cannot import name %S", name);
#endif
}
return value;
}
/* HasAttr */
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) {
PyObject *r;
if (unlikely(!__Pyx_PyBaseString_Check(n))) {
PyErr_SetString(PyExc_TypeError,
"hasattr(): attribute name must be string");
return -1;
}
r = __Pyx_GetAttr(o, n);
if (unlikely(!r)) {
PyErr_Clear();
return 0;
} else {
Py_DECREF(r);
return 1;
}
}
/* PyObject_GenericGetAttrNoDict */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) {
PyErr_Format(PyExc_AttributeError,
#if PY_MAJOR_VERSION >= 3
"'%.50s' object has no attribute '%U'",
tp->tp_name, attr_name);
#else
"'%.50s' object has no attribute '%.400s'",
tp->tp_name, PyString_AS_STRING(attr_name));
#endif
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) {
PyObject *descr;
PyTypeObject *tp = Py_TYPE(obj);
if (unlikely(!PyString_Check(attr_name))) {
return PyObject_GenericGetAttr(obj, attr_name);
}
assert(!tp->tp_dictoffset);
descr = _PyType_Lookup(tp, attr_name);
if (unlikely(!descr)) {
return __Pyx_RaiseGenericGetAttributeError(tp, attr_name);
}
Py_INCREF(descr);
#if PY_MAJOR_VERSION < 3
if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS)))
#endif
{
descrgetfunc f = Py_TYPE(descr)->tp_descr_get;
if (unlikely(f)) {
PyObject *res = f(descr, obj, (PyObject *)tp);
Py_DECREF(descr);
return res;
}
}
return descr;
}
#endif
/* PyObject_GenericGetAttr */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) {
if (unlikely(Py_TYPE(obj)->tp_dictoffset)) {
return PyObject_GenericGetAttr(obj, attr_name);
}
return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name);
}
#endif
/* SetVTable */
static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
#if PY_VERSION_HEX >= 0x02070000
PyObject *ob = PyCapsule_New(vtable, 0, 0);
#else
PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
#endif
if (!ob)
goto bad;
if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0)
goto bad;
Py_DECREF(ob);
return 0;
bad:
Py_XDECREF(ob);
return -1;
}
/* PyObjectGetAttrStrNoError */
static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) {
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
__Pyx_PyErr_Clear();
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) {
PyObject *result;
#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) {
return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1);
}
#endif
result = __Pyx_PyObject_GetAttrStr(obj, attr_name);
if (unlikely(!result)) {
__Pyx_PyObject_GetAttrStr_ClearAttributeError();
}
return result;
}
/* SetupReduce */
static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) {
int ret;
PyObject *name_attr;
name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2);
if (likely(name_attr)) {
ret = PyObject_RichCompareBool(name_attr, name, Py_EQ);
} else {
ret = -1;
}
if (unlikely(ret < 0)) {
PyErr_Clear();
ret = 0;
}
Py_XDECREF(name_attr);
return ret;
}
static int __Pyx_setup_reduce(PyObject* type_obj) {
int ret = 0;
PyObject *object_reduce = NULL;
PyObject *object_reduce_ex = NULL;
PyObject *reduce = NULL;
PyObject *reduce_ex = NULL;
PyObject *reduce_cython = NULL;
PyObject *setstate = NULL;
PyObject *setstate_cython = NULL;
#if CYTHON_USE_PYTYPE_LOOKUP
if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
#else
if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
#endif
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
#else
object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
#endif
reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD;
if (reduce_ex == object_reduce_ex) {
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
#else
object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
#endif
reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD;
if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) {
reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython);
if (likely(reduce_cython)) {
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
} else if (reduce == object_reduce || PyErr_Occurred()) {
goto __PYX_BAD;
}
setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate);
if (!setstate) PyErr_Clear();
if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) {
setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython);
if (likely(setstate_cython)) {
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
} else if (!setstate || PyErr_Occurred()) {
goto __PYX_BAD;
}
}
PyType_Modified((PyTypeObject*)type_obj);
}
}
goto __PYX_GOOD;
__PYX_BAD:
if (!PyErr_Occurred())
PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name);
ret = -1;
__PYX_GOOD:
#if !CYTHON_USE_PYTYPE_LOOKUP
Py_XDECREF(object_reduce);
Py_XDECREF(object_reduce_ex);
#endif
Py_XDECREF(reduce);
Py_XDECREF(reduce_ex);
Py_XDECREF(reduce_cython);
Py_XDECREF(setstate);
Py_XDECREF(setstate_cython);
return ret;
}
/* FetchCommonType */
static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type) {
PyObject* fake_module;
PyTypeObject* cached_type = NULL;
fake_module = PyImport_AddModule((char*) "_cython_" CYTHON_ABI);
if (!fake_module) return NULL;
Py_INCREF(fake_module);
cached_type = (PyTypeObject*) PyObject_GetAttrString(fake_module, type->tp_name);
if (cached_type) {
if (!PyType_Check((PyObject*)cached_type)) {
PyErr_Format(PyExc_TypeError,
"Shared Cython type %.200s is not a type object",
type->tp_name);
goto bad;
}
if (cached_type->tp_basicsize != type->tp_basicsize) {
PyErr_Format(PyExc_TypeError,
"Shared Cython type %.200s has the wrong size, try recompiling",
type->tp_name);
goto bad;
}
} else {
if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad;
PyErr_Clear();
if (PyType_Ready(type) < 0) goto bad;
if (PyObject_SetAttrString(fake_module, type->tp_name, (PyObject*) type) < 0)
goto bad;
Py_INCREF(type);
cached_type = type;
}
done:
Py_DECREF(fake_module);
return cached_type;
bad:
Py_XDECREF(cached_type);
cached_type = NULL;
goto done;
}
/* CythonFunctionShared */
#include <structmember.h>
static PyObject *
__Pyx_CyFunction_get_doc(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *closure)
{
if (unlikely(op->func_doc == NULL)) {
if (op->func.m_ml->ml_doc) {
#if PY_MAJOR_VERSION >= 3
op->func_doc = PyUnicode_FromString(op->func.m_ml->ml_doc);
#else
op->func_doc = PyString_FromString(op->func.m_ml->ml_doc);
#endif
if (unlikely(op->func_doc == NULL))
return NULL;
} else {
Py_INCREF(Py_None);
return Py_None;
}
}
Py_INCREF(op->func_doc);
return op->func_doc;
}
static int
__Pyx_CyFunction_set_doc(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context)
{
PyObject *tmp = op->func_doc;
if (value == NULL) {
value = Py_None;
}
Py_INCREF(value);
op->func_doc = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_name(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
{
if (unlikely(op->func_name == NULL)) {
#if PY_MAJOR_VERSION >= 3
op->func_name = PyUnicode_InternFromString(op->func.m_ml->ml_name);
#else
op->func_name = PyString_InternFromString(op->func.m_ml->ml_name);
#endif
if (unlikely(op->func_name == NULL))
return NULL;
}
Py_INCREF(op->func_name);
return op->func_name;
}
static int
__Pyx_CyFunction_set_name(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context)
{
PyObject *tmp;
#if PY_MAJOR_VERSION >= 3
if (unlikely(value == NULL || !PyUnicode_Check(value)))
#else
if (unlikely(value == NULL || !PyString_Check(value)))
#endif
{
PyErr_SetString(PyExc_TypeError,
"__name__ must be set to a string object");
return -1;
}
tmp = op->func_name;
Py_INCREF(value);
op->func_name = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_qualname(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
{
Py_INCREF(op->func_qualname);
return op->func_qualname;
}
static int
__Pyx_CyFunction_set_qualname(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context)
{
PyObject *tmp;
#if PY_MAJOR_VERSION >= 3
if (unlikely(value == NULL || !PyUnicode_Check(value)))
#else
if (unlikely(value == NULL || !PyString_Check(value)))
#endif
{
PyErr_SetString(PyExc_TypeError,
"__qualname__ must be set to a string object");
return -1;
}
tmp = op->func_qualname;
Py_INCREF(value);
op->func_qualname = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_self(__pyx_CyFunctionObject *m, CYTHON_UNUSED void *closure)
{
PyObject *self;
self = m->func_closure;
if (self == NULL)
self = Py_None;
Py_INCREF(self);
return self;
}
static PyObject *
__Pyx_CyFunction_get_dict(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
{
if (unlikely(op->func_dict == NULL)) {
op->func_dict = PyDict_New();
if (unlikely(op->func_dict == NULL))
return NULL;
}
Py_INCREF(op->func_dict);
return op->func_dict;
}
static int
__Pyx_CyFunction_set_dict(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context)
{
PyObject *tmp;
if (unlikely(value == NULL)) {
PyErr_SetString(PyExc_TypeError,
"function's dictionary may not be deleted");
return -1;
}
if (unlikely(!PyDict_Check(value))) {
PyErr_SetString(PyExc_TypeError,
"setting function's dictionary to a non-dict");
return -1;
}
tmp = op->func_dict;
Py_INCREF(value);
op->func_dict = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_globals(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
{
Py_INCREF(op->func_globals);
return op->func_globals;
}
static PyObject *
__Pyx_CyFunction_get_closure(CYTHON_UNUSED __pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
{
Py_INCREF(Py_None);
return Py_None;
}
static PyObject *
__Pyx_CyFunction_get_code(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
{
PyObject* result = (op->func_code) ? op->func_code : Py_None;
Py_INCREF(result);
return result;
}
static int
__Pyx_CyFunction_init_defaults(__pyx_CyFunctionObject *op) {
int result = 0;
PyObject *res = op->defaults_getter((PyObject *) op);
if (unlikely(!res))
return -1;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
op->defaults_tuple = PyTuple_GET_ITEM(res, 0);
Py_INCREF(op->defaults_tuple);
op->defaults_kwdict = PyTuple_GET_ITEM(res, 1);
Py_INCREF(op->defaults_kwdict);
#else
op->defaults_tuple = PySequence_ITEM(res, 0);
if (unlikely(!op->defaults_tuple)) result = -1;
else {
op->defaults_kwdict = PySequence_ITEM(res, 1);
if (unlikely(!op->defaults_kwdict)) result = -1;
}
#endif
Py_DECREF(res);
return result;
}
static int
__Pyx_CyFunction_set_defaults(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) {
PyObject* tmp;
if (!value) {
value = Py_None;
} else if (value != Py_None && !PyTuple_Check(value)) {
PyErr_SetString(PyExc_TypeError,
"__defaults__ must be set to a tuple object");
return -1;
}
Py_INCREF(value);
tmp = op->defaults_tuple;
op->defaults_tuple = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_defaults(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) {
PyObject* result = op->defaults_tuple;
if (unlikely(!result)) {
if (op->defaults_getter) {
if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL;
result = op->defaults_tuple;
} else {
result = Py_None;
}
}
Py_INCREF(result);
return result;
}
static int
__Pyx_CyFunction_set_kwdefaults(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) {
PyObject* tmp;
if (!value) {
value = Py_None;
} else if (value != Py_None && !PyDict_Check(value)) {
PyErr_SetString(PyExc_TypeError,
"__kwdefaults__ must be set to a dict object");
return -1;
}
Py_INCREF(value);
tmp = op->defaults_kwdict;
op->defaults_kwdict = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_kwdefaults(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) {
PyObject* result = op->defaults_kwdict;
if (unlikely(!result)) {
if (op->defaults_getter) {
if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL;
result = op->defaults_kwdict;
} else {
result = Py_None;
}
}
Py_INCREF(result);
return result;
}
static int
__Pyx_CyFunction_set_annotations(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) {
PyObject* tmp;
if (!value || value == Py_None) {
value = NULL;
} else if (!PyDict_Check(value)) {
PyErr_SetString(PyExc_TypeError,
"__annotations__ must be set to a dict object");
return -1;
}
Py_XINCREF(value);
tmp = op->func_annotations;
op->func_annotations = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_annotations(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) {
PyObject* result = op->func_annotations;
if (unlikely(!result)) {
result = PyDict_New();
if (unlikely(!result)) return NULL;
op->func_annotations = result;
}
Py_INCREF(result);
return result;
}
static PyGetSetDef __pyx_CyFunction_getsets[] = {
{(char *) "func_doc", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0},
{(char *) "__doc__", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0},
{(char *) "func_name", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0},
{(char *) "__name__", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0},
{(char *) "__qualname__", (getter)__Pyx_CyFunction_get_qualname, (setter)__Pyx_CyFunction_set_qualname, 0, 0},
{(char *) "__self__", (getter)__Pyx_CyFunction_get_self, 0, 0, 0},
{(char *) "func_dict", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0},
{(char *) "__dict__", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0},
{(char *) "func_globals", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0},
{(char *) "__globals__", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0},
{(char *) "func_closure", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0},
{(char *) "__closure__", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0},
{(char *) "func_code", (getter)__Pyx_CyFunction_get_code, 0, 0, 0},
{(char *) "__code__", (getter)__Pyx_CyFunction_get_code, 0, 0, 0},
{(char *) "func_defaults", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0},
{(char *) "__defaults__", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0},
{(char *) "__kwdefaults__", (getter)__Pyx_CyFunction_get_kwdefaults, (setter)__Pyx_CyFunction_set_kwdefaults, 0, 0},
{(char *) "__annotations__", (getter)__Pyx_CyFunction_get_annotations, (setter)__Pyx_CyFunction_set_annotations, 0, 0},
{0, 0, 0, 0, 0}
};
static PyMemberDef __pyx_CyFunction_members[] = {
{(char *) "__module__", T_OBJECT, offsetof(PyCFunctionObject, m_module), PY_WRITE_RESTRICTED, 0},
{0, 0, 0, 0, 0}
};
static PyObject *
__Pyx_CyFunction_reduce(__pyx_CyFunctionObject *m, CYTHON_UNUSED PyObject *args)
{
#if PY_MAJOR_VERSION >= 3
return PyUnicode_FromString(m->func.m_ml->ml_name);
#else
return PyString_FromString(m->func.m_ml->ml_name);
#endif
}
static PyMethodDef __pyx_CyFunction_methods[] = {
{"__reduce__", (PyCFunction)__Pyx_CyFunction_reduce, METH_VARARGS, 0},
{0, 0, 0, 0}
};
#if PY_VERSION_HEX < 0x030500A0
#define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func_weakreflist)
#else
#define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func.m_weakreflist)
#endif
static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject *op, PyMethodDef *ml, int flags, PyObject* qualname,
PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) {
if (unlikely(op == NULL))
return NULL;
op->flags = flags;
__Pyx_CyFunction_weakreflist(op) = NULL;
op->func.m_ml = ml;
op->func.m_self = (PyObject *) op;
Py_XINCREF(closure);
op->func_closure = closure;
Py_XINCREF(module);
op->func.m_module = module;
op->func_dict = NULL;
op->func_name = NULL;
Py_INCREF(qualname);
op->func_qualname = qualname;
op->func_doc = NULL;
op->func_classobj = NULL;
op->func_globals = globals;
Py_INCREF(op->func_globals);
Py_XINCREF(code);
op->func_code = code;
op->defaults_pyobjects = 0;
op->defaults_size = 0;
op->defaults = NULL;
op->defaults_tuple = NULL;
op->defaults_kwdict = NULL;
op->defaults_getter = NULL;
op->func_annotations = NULL;
return (PyObject *) op;
}
static int
__Pyx_CyFunction_clear(__pyx_CyFunctionObject *m)
{
Py_CLEAR(m->func_closure);
Py_CLEAR(m->func.m_module);
Py_CLEAR(m->func_dict);
Py_CLEAR(m->func_name);
Py_CLEAR(m->func_qualname);
Py_CLEAR(m->func_doc);
Py_CLEAR(m->func_globals);
Py_CLEAR(m->func_code);
Py_CLEAR(m->func_classobj);
Py_CLEAR(m->defaults_tuple);
Py_CLEAR(m->defaults_kwdict);
Py_CLEAR(m->func_annotations);
if (m->defaults) {
PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m);
int i;
for (i = 0; i < m->defaults_pyobjects; i++)
Py_XDECREF(pydefaults[i]);
PyObject_Free(m->defaults);
m->defaults = NULL;
}
return 0;
}
static void __Pyx__CyFunction_dealloc(__pyx_CyFunctionObject *m)
{
if (__Pyx_CyFunction_weakreflist(m) != NULL)
PyObject_ClearWeakRefs((PyObject *) m);
__Pyx_CyFunction_clear(m);
PyObject_GC_Del(m);
}
static void __Pyx_CyFunction_dealloc(__pyx_CyFunctionObject *m)
{
PyObject_GC_UnTrack(m);
__Pyx__CyFunction_dealloc(m);
}
static int __Pyx_CyFunction_traverse(__pyx_CyFunctionObject *m, visitproc visit, void *arg)
{
Py_VISIT(m->func_closure);
Py_VISIT(m->func.m_module);
Py_VISIT(m->func_dict);
Py_VISIT(m->func_name);
Py_VISIT(m->func_qualname);
Py_VISIT(m->func_doc);
Py_VISIT(m->func_globals);
Py_VISIT(m->func_code);
Py_VISIT(m->func_classobj);
Py_VISIT(m->defaults_tuple);
Py_VISIT(m->defaults_kwdict);
if (m->defaults) {
PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m);
int i;
for (i = 0; i < m->defaults_pyobjects; i++)
Py_VISIT(pydefaults[i]);
}
return 0;
}
static PyObject *__Pyx_CyFunction_descr_get(PyObject *func, PyObject *obj, PyObject *type)
{
#if PY_MAJOR_VERSION < 3
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
if (m->flags & __Pyx_CYFUNCTION_STATICMETHOD) {
Py_INCREF(func);
return func;
}
if (m->flags & __Pyx_CYFUNCTION_CLASSMETHOD) {
if (type == NULL)
type = (PyObject *)(Py_TYPE(obj));
return __Pyx_PyMethod_New(func, type, (PyObject *)(Py_TYPE(type)));
}
if (obj == Py_None)
obj = NULL;
#endif
return __Pyx_PyMethod_New(func, obj, type);
}
static PyObject*
__Pyx_CyFunction_repr(__pyx_CyFunctionObject *op)
{
#if PY_MAJOR_VERSION >= 3
return PyUnicode_FromFormat("<cyfunction %U at %p>",
op->func_qualname, (void *)op);
#else
return PyString_FromFormat("<cyfunction %s at %p>",
PyString_AsString(op->func_qualname), (void *)op);
#endif
}
static PyObject * __Pyx_CyFunction_CallMethod(PyObject *func, PyObject *self, PyObject *arg, PyObject *kw) {
PyCFunctionObject* f = (PyCFunctionObject*)func;
PyCFunction meth = f->m_ml->ml_meth;
Py_ssize_t size;
switch (f->m_ml->ml_flags & (METH_VARARGS | METH_KEYWORDS | METH_NOARGS | METH_O)) {
case METH_VARARGS:
if (likely(kw == NULL || PyDict_Size(kw) == 0))
return (*meth)(self, arg);
break;
case METH_VARARGS | METH_KEYWORDS:
return (*(PyCFunctionWithKeywords)(void*)meth)(self, arg, kw);
case METH_NOARGS:
if (likely(kw == NULL || PyDict_Size(kw) == 0)) {
size = PyTuple_GET_SIZE(arg);
if (likely(size == 0))
return (*meth)(self, NULL);
PyErr_Format(PyExc_TypeError,
"%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)",
f->m_ml->ml_name, size);
return NULL;
}
break;
case METH_O:
if (likely(kw == NULL || PyDict_Size(kw) == 0)) {
size = PyTuple_GET_SIZE(arg);
if (likely(size == 1)) {
PyObject *result, *arg0;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
arg0 = PyTuple_GET_ITEM(arg, 0);
#else
arg0 = PySequence_ITEM(arg, 0); if (unlikely(!arg0)) return NULL;
#endif
result = (*meth)(self, arg0);
#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS)
Py_DECREF(arg0);
#endif
return result;
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)",
f->m_ml->ml_name, size);
return NULL;
}
break;
default:
PyErr_SetString(PyExc_SystemError, "Bad call flags in "
"__Pyx_CyFunction_Call. METH_OLDARGS is no "
"longer supported!");
return NULL;
}
PyErr_Format(PyExc_TypeError, "%.200s() takes no keyword arguments",
f->m_ml->ml_name);
return NULL;
}
static CYTHON_INLINE PyObject *__Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) {
return __Pyx_CyFunction_CallMethod(func, ((PyCFunctionObject*)func)->m_self, arg, kw);
}
static PyObject *__Pyx_CyFunction_CallAsMethod(PyObject *func, PyObject *args, PyObject *kw) {
PyObject *result;
__pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func;
if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) {
Py_ssize_t argc;
PyObject *new_args;
PyObject *self;
argc = PyTuple_GET_SIZE(args);
new_args = PyTuple_GetSlice(args, 1, argc);
if (unlikely(!new_args))
return NULL;
self = PyTuple_GetItem(args, 0);
if (unlikely(!self)) {
Py_DECREF(new_args);
return NULL;
}
result = __Pyx_CyFunction_CallMethod(func, self, new_args, kw);
Py_DECREF(new_args);
} else {
result = __Pyx_CyFunction_Call(func, args, kw);
}
return result;
}
static PyTypeObject __pyx_CyFunctionType_type = {
PyVarObject_HEAD_INIT(0, 0)
"cython_function_or_method",
sizeof(__pyx_CyFunctionObject),
0,
(destructor) __Pyx_CyFunction_dealloc,
0,
0,
0,
#if PY_MAJOR_VERSION < 3
0,
#else
0,
#endif
(reprfunc) __Pyx_CyFunction_repr,
0,
0,
0,
0,
__Pyx_CyFunction_CallAsMethod,
0,
0,
0,
0,
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
0,
(traverseproc) __Pyx_CyFunction_traverse,
(inquiry) __Pyx_CyFunction_clear,
0,
#if PY_VERSION_HEX < 0x030500A0
offsetof(__pyx_CyFunctionObject, func_weakreflist),
#else
offsetof(PyCFunctionObject, m_weakreflist),
#endif
0,
0,
__pyx_CyFunction_methods,
__pyx_CyFunction_members,
__pyx_CyFunction_getsets,
0,
0,
__Pyx_CyFunction_descr_get,
0,
offsetof(__pyx_CyFunctionObject, func_dict),
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
#if PY_VERSION_HEX >= 0x030400a1
0,
#endif
#if PY_VERSION_HEX >= 0x030800b1
0,
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0,
#endif
};
static int __pyx_CyFunction_init(void) {
__pyx_CyFunctionType = __Pyx_FetchCommonType(&__pyx_CyFunctionType_type);
if (unlikely(__pyx_CyFunctionType == NULL)) {
return -1;
}
return 0;
}
static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *func, size_t size, int pyobjects) {
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
m->defaults = PyObject_Malloc(size);
if (unlikely(!m->defaults))
return PyErr_NoMemory();
memset(m->defaults, 0, size);
m->defaults_pyobjects = pyobjects;
m->defaults_size = size;
return m->defaults;
}
static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *func, PyObject *tuple) {
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
m->defaults_tuple = tuple;
Py_INCREF(tuple);
}
static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *func, PyObject *dict) {
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
m->defaults_kwdict = dict;
Py_INCREF(dict);
}
static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *func, PyObject *dict) {
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
m->func_annotations = dict;
Py_INCREF(dict);
}
/* FusedFunction */
static PyObject *
__pyx_FusedFunction_New(PyMethodDef *ml, int flags,
PyObject *qualname, PyObject *closure,
PyObject *module, PyObject *globals,
PyObject *code)
{
PyObject *op = __Pyx_CyFunction_Init(
PyObject_GC_New(__pyx_CyFunctionObject, __pyx_FusedFunctionType),
ml, flags, qualname, closure, module, globals, code
);
if (likely(op)) {
__pyx_FusedFunctionObject *fusedfunc = (__pyx_FusedFunctionObject *) op;
fusedfunc->__signatures__ = NULL;
fusedfunc->type = NULL;
fusedfunc->self = NULL;
PyObject_GC_Track(op);
}
return op;
}
static void
__pyx_FusedFunction_dealloc(__pyx_FusedFunctionObject *self)
{
PyObject_GC_UnTrack(self);
Py_CLEAR(self->self);
Py_CLEAR(self->type);
Py_CLEAR(self->__signatures__);
__Pyx__CyFunction_dealloc((__pyx_CyFunctionObject *) self);
}
static int
__pyx_FusedFunction_traverse(__pyx_FusedFunctionObject *self,
visitproc visit,
void *arg)
{
Py_VISIT(self->self);
Py_VISIT(self->type);
Py_VISIT(self->__signatures__);
return __Pyx_CyFunction_traverse((__pyx_CyFunctionObject *) self, visit, arg);
}
static int
__pyx_FusedFunction_clear(__pyx_FusedFunctionObject *self)
{
Py_CLEAR(self->self);
Py_CLEAR(self->type);
Py_CLEAR(self->__signatures__);
return __Pyx_CyFunction_clear((__pyx_CyFunctionObject *) self);
}
static PyObject *
__pyx_FusedFunction_descr_get(PyObject *self, PyObject *obj, PyObject *type)
{
__pyx_FusedFunctionObject *func, *meth;
func = (__pyx_FusedFunctionObject *) self;
if (func->self || func->func.flags & __Pyx_CYFUNCTION_STATICMETHOD) {
Py_INCREF(self);
return self;
}
if (obj == Py_None)
obj = NULL;
meth = (__pyx_FusedFunctionObject *) __pyx_FusedFunction_New(
((PyCFunctionObject *) func)->m_ml,
((__pyx_CyFunctionObject *) func)->flags,
((__pyx_CyFunctionObject *) func)->func_qualname,
((__pyx_CyFunctionObject *) func)->func_closure,
((PyCFunctionObject *) func)->m_module,
((__pyx_CyFunctionObject *) func)->func_globals,
((__pyx_CyFunctionObject *) func)->func_code);
if (!meth)
return NULL;
if (func->func.defaults) {
PyObject **pydefaults;
int i;
if (!__Pyx_CyFunction_InitDefaults((PyObject*)meth,
func->func.defaults_size,
func->func.defaults_pyobjects)) {
Py_XDECREF((PyObject*)meth);
return NULL;
}
memcpy(meth->func.defaults, func->func.defaults, func->func.defaults_size);
pydefaults = __Pyx_CyFunction_Defaults(PyObject *, meth);
for (i = 0; i < meth->func.defaults_pyobjects; i++)
Py_XINCREF(pydefaults[i]);
}
Py_XINCREF(func->func.func_classobj);
meth->func.func_classobj = func->func.func_classobj;
Py_XINCREF(func->__signatures__);
meth->__signatures__ = func->__signatures__;
Py_XINCREF(type);
meth->type = type;
Py_XINCREF(func->func.defaults_tuple);
meth->func.defaults_tuple = func->func.defaults_tuple;
if (func->func.flags & __Pyx_CYFUNCTION_CLASSMETHOD)
obj = type;
Py_XINCREF(obj);
meth->self = obj;
return (PyObject *) meth;
}
static PyObject *
_obj_to_str(PyObject *obj)
{
if (PyType_Check(obj))
return PyObject_GetAttr(obj, __pyx_n_s_name_2);
else
return PyObject_Str(obj);
}
static PyObject *
__pyx_FusedFunction_getitem(__pyx_FusedFunctionObject *self, PyObject *idx)
{
PyObject *signature = NULL;
PyObject *unbound_result_func;
PyObject *result_func = NULL;
if (self->__signatures__ == NULL) {
PyErr_SetString(PyExc_TypeError, "Function is not fused");
return NULL;
}
if (PyTuple_Check(idx)) {
PyObject *list = PyList_New(0);
Py_ssize_t n = PyTuple_GET_SIZE(idx);
PyObject *sep = NULL;
int i;
if (unlikely(!list))
return NULL;
for (i = 0; i < n; i++) {
int ret;
PyObject *string;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
PyObject *item = PyTuple_GET_ITEM(idx, i);
#else
PyObject *item = PySequence_ITEM(idx, i); if (unlikely(!item)) goto __pyx_err;
#endif
string = _obj_to_str(item);
#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS)
Py_DECREF(item);
#endif
if (unlikely(!string)) goto __pyx_err;
ret = PyList_Append(list, string);
Py_DECREF(string);
if (unlikely(ret < 0)) goto __pyx_err;
}
sep = PyUnicode_FromString("|");
if (likely(sep))
signature = PyUnicode_Join(sep, list);
__pyx_err:
;
Py_DECREF(list);
Py_XDECREF(sep);
} else {
signature = _obj_to_str(idx);
}
if (!signature)
return NULL;
unbound_result_func = PyObject_GetItem(self->__signatures__, signature);
if (unbound_result_func) {
if (self->self || self->type) {
__pyx_FusedFunctionObject *unbound = (__pyx_FusedFunctionObject *) unbound_result_func;
Py_CLEAR(unbound->func.func_classobj);
Py_XINCREF(self->func.func_classobj);
unbound->func.func_classobj = self->func.func_classobj;
result_func = __pyx_FusedFunction_descr_get(unbound_result_func,
self->self, self->type);
} else {
result_func = unbound_result_func;
Py_INCREF(result_func);
}
}
Py_DECREF(signature);
Py_XDECREF(unbound_result_func);
return result_func;
}
static PyObject *
__pyx_FusedFunction_callfunction(PyObject *func, PyObject *args, PyObject *kw)
{
__pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func;
int static_specialized = (cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD &&
!((__pyx_FusedFunctionObject *) func)->__signatures__);
if (cyfunc->flags & __Pyx_CYFUNCTION_CCLASS && !static_specialized) {
return __Pyx_CyFunction_CallAsMethod(func, args, kw);
} else {
return __Pyx_CyFunction_Call(func, args, kw);
}
}
static PyObject *
__pyx_FusedFunction_call(PyObject *func, PyObject *args, PyObject *kw)
{
__pyx_FusedFunctionObject *binding_func = (__pyx_FusedFunctionObject *) func;
Py_ssize_t argc = PyTuple_GET_SIZE(args);
PyObject *new_args = NULL;
__pyx_FusedFunctionObject *new_func = NULL;
PyObject *result = NULL;
PyObject *self = NULL;
int is_staticmethod = binding_func->func.flags & __Pyx_CYFUNCTION_STATICMETHOD;
int is_classmethod = binding_func->func.flags & __Pyx_CYFUNCTION_CLASSMETHOD;
if (binding_func->self) {
Py_ssize_t i;
new_args = PyTuple_New(argc + 1);
if (!new_args)
return NULL;
self = binding_func->self;
#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS)
Py_INCREF(self);
#endif
Py_INCREF(self);
PyTuple_SET_ITEM(new_args, 0, self);
for (i = 0; i < argc; i++) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
PyObject *item = PyTuple_GET_ITEM(args, i);
Py_INCREF(item);
#else
PyObject *item = PySequence_ITEM(args, i); if (unlikely(!item)) goto bad;
#endif
PyTuple_SET_ITEM(new_args, i + 1, item);
}
args = new_args;
} else if (binding_func->type) {
if (argc < 1) {
PyErr_SetString(PyExc_TypeError, "Need at least one argument, 0 given.");
return NULL;
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
self = PyTuple_GET_ITEM(args, 0);
#else
self = PySequence_ITEM(args, 0); if (unlikely(!self)) return NULL;
#endif
}
if (self && !is_classmethod && !is_staticmethod) {
int is_instance = PyObject_IsInstance(self, binding_func->type);
if (unlikely(!is_instance)) {
PyErr_Format(PyExc_TypeError,
"First argument should be of type %.200s, got %.200s.",
((PyTypeObject *) binding_func->type)->tp_name,
self->ob_type->tp_name);
goto bad;
} else if (unlikely(is_instance == -1)) {
goto bad;
}
}
#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS)
Py_XDECREF(self);
self = NULL;
#endif
if (binding_func->__signatures__) {
PyObject *tup;
if (is_staticmethod && binding_func->func.flags & __Pyx_CYFUNCTION_CCLASS) {
tup = PyTuple_Pack(3, args,
kw == NULL ? Py_None : kw,
binding_func->func.defaults_tuple);
if (unlikely(!tup)) goto bad;
new_func = (__pyx_FusedFunctionObject *) __Pyx_CyFunction_CallMethod(
func, binding_func->__signatures__, tup, NULL);
} else {
tup = PyTuple_Pack(4, binding_func->__signatures__, args,
kw == NULL ? Py_None : kw,
binding_func->func.defaults_tuple);
if (unlikely(!tup)) goto bad;
new_func = (__pyx_FusedFunctionObject *) __pyx_FusedFunction_callfunction(func, tup, NULL);
}
Py_DECREF(tup);
if (unlikely(!new_func))
goto bad;
Py_XINCREF(binding_func->func.func_classobj);
Py_CLEAR(new_func->func.func_classobj);
new_func->func.func_classobj = binding_func->func.func_classobj;
func = (PyObject *) new_func;
}
result = __pyx_FusedFunction_callfunction(func, args, kw);
bad:
#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS)
Py_XDECREF(self);
#endif
Py_XDECREF(new_args);
Py_XDECREF((PyObject *) new_func);
return result;
}
static PyMemberDef __pyx_FusedFunction_members[] = {
{(char *) "__signatures__",
T_OBJECT,
offsetof(__pyx_FusedFunctionObject, __signatures__),
READONLY,
0},
{0, 0, 0, 0, 0},
};
static PyMappingMethods __pyx_FusedFunction_mapping_methods = {
0,
(binaryfunc) __pyx_FusedFunction_getitem,
0,
};
static PyTypeObject __pyx_FusedFunctionType_type = {
PyVarObject_HEAD_INIT(0, 0)
"fused_cython_function",
sizeof(__pyx_FusedFunctionObject),
0,
(destructor) __pyx_FusedFunction_dealloc,
0,
0,
0,
#if PY_MAJOR_VERSION < 3
0,
#else
0,
#endif
0,
0,
0,
&__pyx_FusedFunction_mapping_methods,
0,
(ternaryfunc) __pyx_FusedFunction_call,
0,
0,
0,
0,
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE,
0,
(traverseproc) __pyx_FusedFunction_traverse,
(inquiry) __pyx_FusedFunction_clear,
0,
0,
0,
0,
0,
__pyx_FusedFunction_members,
__pyx_CyFunction_getsets,
&__pyx_CyFunctionType_type,
0,
__pyx_FusedFunction_descr_get,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
#if PY_VERSION_HEX >= 0x030400a1
0,
#endif
#if PY_VERSION_HEX >= 0x030800b1
0,
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0,
#endif
};
static int __pyx_FusedFunction_init(void) {
__pyx_FusedFunctionType_type.tp_base = __pyx_CyFunctionType;
__pyx_FusedFunctionType = __Pyx_FetchCommonType(&__pyx_FusedFunctionType_type);
if (__pyx_FusedFunctionType == NULL) {
return -1;
}
return 0;
}
/* CLineInTraceback */
#ifndef CYTHON_CLINE_IN_TRACEBACK
static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) {
PyObject *use_cline;
PyObject *ptype, *pvalue, *ptraceback;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject **cython_runtime_dict;
#endif
if (unlikely(!__pyx_cython_runtime)) {
return c_line;
}
__Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
#if CYTHON_COMPILING_IN_CPYTHON
cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
if (likely(cython_runtime_dict)) {
__PYX_PY_DICT_LOOKUP_IF_MODIFIED(
use_cline, *cython_runtime_dict,
__Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback))
} else
#endif
{
PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
if (use_cline_obj) {
use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
Py_DECREF(use_cline_obj);
} else {
PyErr_Clear();
use_cline = NULL;
}
}
if (!use_cline) {
c_line = 0;
PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
}
else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
c_line = 0;
}
__Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
return c_line;
}
#endif
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
if (c_line) {
c_line = __Pyx_CLineForTraceback(tstate, c_line);
}
py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
}
py_frame = PyFrame_New(
tstate, /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
__Pyx_PyFrame_SetLineNumber(py_frame, py_line);
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags);
if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags);
PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject *obj = view->obj;
if (!obj) return;
if (PyObject_CheckBuffer(obj)) {
PyBuffer_Release(view);
return;
}
if ((0)) {}
view->obj = NULL;
Py_DECREF(obj);
}
#endif
/* MemviewSliceIsContig */
static int
__pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim)
{
int i, index, step, start;
Py_ssize_t itemsize = mvs.memview->view.itemsize;
if (order == 'F') {
step = 1;
start = 0;
} else {
step = -1;
start = ndim - 1;
}
for (i = 0; i < ndim; i++) {
index = start + step * i;
if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize)
return 0;
itemsize *= mvs.shape[index];
}
return 1;
}
/* OverlappingSlices */
static void
__pyx_get_array_memory_extents(__Pyx_memviewslice *slice,
void **out_start, void **out_end,
int ndim, size_t itemsize)
{
char *start, *end;
int i;
start = end = slice->data;
for (i = 0; i < ndim; i++) {
Py_ssize_t stride = slice->strides[i];
Py_ssize_t extent = slice->shape[i];
if (extent == 0) {
*out_start = *out_end = start;
return;
} else {
if (stride > 0)
end += stride * (extent - 1);
else
start += stride * (extent - 1);
}
}
*out_start = start;
*out_end = end + itemsize;
}
static int
__pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize)
{
void *start1, *end1, *start2, *end2;
__pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize);
__pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize);
return (start1 < end2) && (start2 < end1);
}
/* Capsule */
static CYTHON_INLINE PyObject *
__pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig)
{
PyObject *cobj;
#if PY_VERSION_HEX >= 0x02070000
cobj = PyCapsule_New(p, sig, NULL);
#else
cobj = PyCObject_FromVoidPtr(p, NULL);
#endif
return cobj;
}
/* IsLittleEndian */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void)
{
union {
uint32_t u32;
uint8_t u8[4];
} S;
S.u32 = 0x01020304;
return S.u8[0] == 4;
}
/* BufferFormatCheck */
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t <= '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1)
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case '?': return "'bool'";
case 'c': return "'char'";
case 'b': return "'signed char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c':
return 'H';
case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case '?': case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
if (ctx->enc_type == 0) return 0;
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
} else {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count;
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break;
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue;
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number, ndim;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ndim = ctx->head->field->type->ndim;
while (*ts && *ts != ')') {
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break;
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case '\r':
case '\n':
++ts;
break;
case '<':
if (!__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T':
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}':
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
if (alignment && ctx->fmt_offset % alignment) {
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
}
CYTHON_FALLTHROUGH;
case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 'p':
if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) &&
(ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) {
ctx->enc_count += ctx->new_count;
ctx->new_count = 1;
got_Z = 0;
++ts;
break;
}
CYTHON_FALLTHROUGH;
case 's':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
/* TypeInfoCompare */
static int
__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b)
{
int i;
if (!a || !b)
return 0;
if (a == b)
return 1;
if (a->size != b->size || a->typegroup != b->typegroup ||
a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) {
if (a->typegroup == 'H' || b->typegroup == 'H') {
return a->size == b->size;
} else {
return 0;
}
}
if (a->ndim) {
for (i = 0; i < a->ndim; i++)
if (a->arraysize[i] != b->arraysize[i])
return 0;
}
if (a->typegroup == 'S') {
if (a->flags != b->flags)
return 0;
if (a->fields || b->fields) {
if (!(a->fields && b->fields))
return 0;
for (i = 0; a->fields[i].type && b->fields[i].type; i++) {
__Pyx_StructField *field_a = a->fields + i;
__Pyx_StructField *field_b = b->fields + i;
if (field_a->offset != field_b->offset ||
!__pyx_typeinfo_cmp(field_a->type, field_b->type))
return 0;
}
return !a->fields[i].type && !b->fields[i].type;
}
}
return 1;
}
/* MemviewSliceValidateAndInit */
static int
__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec)
{
if (buf->shape[dim] <= 1)
return 1;
if (buf->strides) {
if (spec & __Pyx_MEMVIEW_CONTIG) {
if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) {
if (unlikely(buf->strides[dim] != sizeof(void *))) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly contiguous "
"in dimension %d.", dim);
goto fail;
}
} else if (unlikely(buf->strides[dim] != buf->itemsize)) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_FOLLOW) {
Py_ssize_t stride = buf->strides[dim];
if (stride < 0)
stride = -stride;
if (unlikely(stride < buf->itemsize)) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
} else {
if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not contiguous in "
"dimension %d", dim);
goto fail;
} else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not indirect in "
"dimension %d", dim);
goto fail;
} else if (unlikely(buf->suboffsets)) {
PyErr_SetString(PyExc_ValueError,
"Buffer exposes suboffsets but no strides");
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec)
{
if (spec & __Pyx_MEMVIEW_DIRECT) {
if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) {
PyErr_Format(PyExc_ValueError,
"Buffer not compatible with direct access "
"in dimension %d.", dim);
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_PTR) {
if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly accessible "
"in dimension %d.", dim);
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag)
{
int i;
if (c_or_f_flag & __Pyx_IS_F_CONTIG) {
Py_ssize_t stride = 1;
for (i = 0; i < ndim; i++) {
if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) {
PyErr_SetString(PyExc_ValueError,
"Buffer not fortran contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
} else if (c_or_f_flag & __Pyx_IS_C_CONTIG) {
Py_ssize_t stride = 1;
for (i = ndim - 1; i >- 1; i--) {
if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) {
PyErr_SetString(PyExc_ValueError,
"Buffer not C contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
}
return 1;
fail:
return 0;
}
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj)
{
struct __pyx_memoryview_obj *memview, *new_memview;
__Pyx_RefNannyDeclarations
Py_buffer *buf;
int i, spec = 0, retval = -1;
__Pyx_BufFmt_Context ctx;
int from_memoryview = __pyx_memoryview_check(original_obj);
__Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0);
if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *)
original_obj)->typeinfo)) {
memview = (struct __pyx_memoryview_obj *) original_obj;
new_memview = NULL;
} else {
memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
original_obj, buf_flags, 0, dtype);
new_memview = memview;
if (unlikely(!memview))
goto fail;
}
buf = &memview->view;
if (unlikely(buf->ndim != ndim)) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
ndim, buf->ndim);
goto fail;
}
if (new_memview) {
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail;
}
if (unlikely((unsigned) buf->itemsize != dtype->size)) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) "
"does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)",
buf->itemsize,
(buf->itemsize > 1) ? "s" : "",
dtype->name,
dtype->size,
(dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->len > 0) {
for (i = 0; i < ndim; i++) {
spec = axes_specs[i];
if (unlikely(!__pyx_check_strides(buf, i, ndim, spec)))
goto fail;
if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec)))
goto fail;
}
if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)))
goto fail;
}
if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice,
new_memview != NULL) == -1)) {
goto fail;
}
retval = 0;
goto no_fail;
fail:
Py_XDECREF(new_memview);
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_float(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG,
(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 2,
&__Pyx_TypeInfo_float, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG,
(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 2,
&__Pyx_TypeInfo_double, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_float(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG,
(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1,
&__Pyx_TypeInfo_float, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_double(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG,
(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1,
&__Pyx_TypeInfo_double, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* MemviewDtypeToObject */
static CYTHON_INLINE PyObject *__pyx_memview_get_float(const char *itemp) {
return (PyObject *) PyFloat_FromDouble(*(float *) itemp);
}
static CYTHON_INLINE int __pyx_memview_set_float(const char *itemp, PyObject *obj) {
float value = __pyx_PyFloat_AsFloat(obj);
if ((value == (float)-1) && PyErr_Occurred())
return 0;
*(float *) itemp = value;
return 1;
}
/* MemviewDtypeToObject */
static CYTHON_INLINE PyObject *__pyx_memview_get_double(const char *itemp) {
return (PyObject *) PyFloat_FromDouble(*(double *) itemp);
}
static CYTHON_INLINE int __pyx_memview_set_double(const char *itemp, PyObject *obj) {
double value = __pyx_PyFloat_AsDouble(obj);
if ((value == (double)-1) && PyErr_Occurred())
return 0;
*(double *) itemp = value;
return 1;
}
/* MemviewSliceCopyTemplate */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object)
{
__Pyx_RefNannyDeclarations
int i;
__Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } };
struct __pyx_memoryview_obj *from_memview = from_mvs->memview;
Py_buffer *buf = &from_memview->view;
PyObject *shape_tuple = NULL;
PyObject *temp_int = NULL;
struct __pyx_array_obj *array_obj = NULL;
struct __pyx_memoryview_obj *memview_obj = NULL;
__Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0);
for (i = 0; i < ndim; i++) {
if (unlikely(from_mvs->suboffsets[i] >= 0)) {
PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with "
"indirect dimensions (axis %d)", i);
goto fail;
}
}
shape_tuple = PyTuple_New(ndim);
if (unlikely(!shape_tuple)) {
goto fail;
}
__Pyx_GOTREF(shape_tuple);
for(i = 0; i < ndim; i++) {
temp_int = PyInt_FromSsize_t(from_mvs->shape[i]);
if(unlikely(!temp_int)) {
goto fail;
} else {
PyTuple_SET_ITEM(shape_tuple, i, temp_int);
temp_int = NULL;
}
}
array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL);
if (unlikely(!array_obj)) {
goto fail;
}
__Pyx_GOTREF(array_obj);
memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
(PyObject *) array_obj, contig_flag,
dtype_is_object,
from_mvs->memview->typeinfo);
if (unlikely(!memview_obj))
goto fail;
if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0))
goto fail;
if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim,
dtype_is_object) < 0))
goto fail;
goto no_fail;
fail:
__Pyx_XDECREF(new_mvs.memview);
new_mvs.memview = NULL;
new_mvs.data = NULL;
no_fail:
__Pyx_XDECREF(shape_tuple);
__Pyx_XDECREF(temp_int);
__Pyx_XDECREF(array_obj);
__Pyx_RefNannyFinishContext();
return new_mvs;
}
/* BytesContains */
static CYTHON_INLINE int __Pyx_BytesContains(PyObject* bytes, char character) {
const Py_ssize_t length = PyBytes_GET_SIZE(bytes);
char* char_start = PyBytes_AS_STRING(bytes);
return memchr(char_start, (unsigned char)character, (size_t)length) != NULL;
}
/* ImportNumPyArray */
static PyObject* __Pyx__ImportNumPyArray(void) {
PyObject *numpy_module, *ndarray_object = NULL;
numpy_module = __Pyx_Import(__pyx_n_s_numpy, NULL, 0);
if (likely(numpy_module)) {
ndarray_object = PyObject_GetAttrString(numpy_module, "ndarray");
Py_DECREF(numpy_module);
}
if (unlikely(!ndarray_object)) {
PyErr_Clear();
}
if (unlikely(!ndarray_object || !PyObject_TypeCheck(ndarray_object, &PyType_Type))) {
Py_XDECREF(ndarray_object);
Py_INCREF(Py_None);
ndarray_object = Py_None;
}
return ndarray_object;
}
static CYTHON_INLINE PyObject* __Pyx_ImportNumPyArrayTypeIfAvailable(void) {
if (unlikely(!__pyx_numpy_ndarray)) {
__pyx_numpy_ndarray = __Pyx__ImportNumPyArray();
}
Py_INCREF(__pyx_numpy_ndarray);
return __pyx_numpy_ndarray;
}
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(int),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) {
const char neg_one = (char) ((char) 0 - (char) 1), const_zero = (char) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(char) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (char) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (char) 0;
case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0])
case 2:
if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) {
return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) {
return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) {
return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (char) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(char) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (char) 0;
case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0])
case -2:
if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
}
#endif
if (sizeof(char) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(char) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
char val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (char) -1;
}
} else {
char val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (char) -1;
val = __Pyx_PyInt_As_char(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to char");
return (char) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to char");
return (char) -1;
}
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
if (PyObject_Hash(*t->p) == -1)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
#if !CYTHON_PEP393_ENABLED
static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
}
#else
static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (likely(PyUnicode_IS_ASCII(o))) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
}
#endif
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
return __Pyx_PyUnicode_AsStringAndSize(o, length);
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
int retval;
if (unlikely(!x)) return -1;
retval = __Pyx_PyObject_IsTrue(x);
Py_DECREF(x);
return retval;
}
static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
#if PY_MAJOR_VERSION >= 3
if (PyLong_Check(result)) {
if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
"__int__ returned non-int (type %.200s). "
"The ability to return an instance of a strict subclass of int "
"is deprecated, and may be removed in a future version of Python.",
Py_TYPE(result)->tp_name)) {
Py_DECREF(result);
return NULL;
}
return result;
}
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
type_name, type_name, Py_TYPE(result)->tp_name);
Py_DECREF(result);
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
PyNumberMethods *m;
#endif
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x) || PyLong_Check(x)))
#else
if (likely(PyLong_Check(x)))
#endif
return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = m->nb_int(x);
}
else if (m && m->nb_long) {
name = "long";
res = m->nb_long(x);
}
#else
if (likely(m && m->nb_int)) {
name = "int";
res = m->nb_int(x);
}
#endif
#else
if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
res = PyNumber_Int(x);
}
#endif
if (likely(res)) {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
#else
if (unlikely(!PyLong_CheckExact(res))) {
#endif
return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(b);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
|
nco_s1d.c | /* $Header$ */
/* Purpose: NCO utilities for Sparse-1D (S1D) datasets */
/* Copyright (C) 2015--present Charlie Zender
This file is part of NCO, the netCDF Operators. NCO is free software.
You may redistribute and/or modify NCO under the terms of the
3-Clause BSD License with exceptions described in the LICENSE file */
#include "nco_s1d.h" /* Sparse-1D datasets */
const char * /* O [sng] String describing sparse-type */
nco_s1d_sng /* [fnc] Convert sparse-1D type enum to string */
(const nco_s1d_typ_enm nco_s1d_typ) /* I [enm] Sparse-1D type enum */
{
/* Purpose: Convert sparse-type enum to string */
switch(nco_s1d_typ){
case nco_s1d_clm: return "Sparse Column (cols1d) format";
case nco_s1d_grd: return "Sparse Gridcell (grid1d) format";
case nco_s1d_lnd: return "Sparse Landunit (land1d) format";
case nco_s1d_pft: return "Sparse PFT (pfts1d) format" ;
default: nco_dfl_case_generic_err(); break;
} /* !nco_s1d_typ_enm */
/* Some compilers: e.g., SGI cc, need return statement to end non-void functions */
return (char *)NULL;
} /* !nco_s1d_sng() */
int /* O [rcd] Return code */
nco_s1d_unpack /* [fnc] Unpack sparse-1D CLM/ELM variables into full file */
(rgr_sct * const rgr, /* I/O [sct] Regridding structure */
trv_tbl_sct * const trv_tbl) /* I/O [sct] Traversal Table */
{
/* Purpose: Read sparse CLM/ELM input file, inflate and write into output file */
/* Usage:
ncks -O -C --s1d -v cols1d_topoglc ~/data/bm/elm_mali_rst.nc ~/foo.nc
ncks -O -C --s1d -v cols1d_topoglc --hrz=${DATA}/bm/elm_mali_ig_hst.nc ${DATA}/bm/elm_mali_rst.nc ~/foo.nc */
const char fnc_nm[]="nco_s1d_unpack()"; /* [sng] Function name */
char var_nm[NC_MAX_NAME+1L];
char *fl_in;
char *fl_out;
char *fl_tpl; /* [sng] Template file (contains horizontal grid) */
char dmn_nm[NC_MAX_NAME]; /* [sng] Dimension name */
char *grd_nm_in=(char *)strdup("gridcell");
char *lnd_nm_in=(char *)strdup("landunit");
char *clm_nm_in=(char *)strdup("column");
char *pft_nm_in=(char *)strdup("pft");
int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */
int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */
int fll_md_old; /* [enm] Old fill mode */
int in_id; /* I [id] Input netCDF file ID */
int md_open; /* [enm] Mode flag for nc_open() call */
int out_id; /* I [id] Output netCDF file ID */
int rcd=NC_NOERR;
int tpl_id; /* [id] Input netCDF file ID (for horizontal grid template) */
int dmn_idx; /* [idx] Dimension index */
/* Initialize local copies of command-line values */
dfl_lvl=rgr->dfl_lvl;
fl_in=rgr->fl_in;
fl_out=rgr->fl_out;
in_id=rgr->in_id;
out_id=rgr->out_id;
/* Search for horizontal grid */
char *col_nm_in=rgr->col_nm_in; /* [sng] Name to recognize as input horizontal spatial dimension on unstructured grid */
char *lat_nm_in=rgr->lat_nm_in; /* [sng] Name of input dimension to recognize as latitude */
char *lon_nm_in=rgr->lon_nm_in; /* [sng] Name of input dimension to recognize as longitude */
int dmn_id_col_in=NC_MIN_INT; /* [id] Dimension ID */
int dmn_id_lat_in=NC_MIN_INT; /* [id] Dimension ID */
int dmn_id_lon_in=NC_MIN_INT; /* [id] Dimension ID */
nco_bool FL_RTR_RMT_LCN;
nco_bool flg_grd_1D=False; /* [flg] Unpacked data are on unstructured (1D) grid */
nco_bool flg_grd_rct=False; /* [flg] Unpacked data are on rectangular (2D) grid */
nco_bool flg_grd_dat=False; /* [flg] Use horizontal grid from required input data file */
nco_bool flg_grd_tpl=False; /* [flg] Use horizontal grid from optional horizontal grid template file */
/* Does data file have unstructured grid?
MB: Routine must handle two semantically distinct meanings of "column":
1. The horizontal dimension in an unstructured grid
2. A fraction of a landunit, which is a fraction of a CTSM/ELM gridcell
In particular, a column is a fraction of a vegetated, urban, glacier, or crop landunit
This routine distinguishes these meanings by abbreviating (1) as "col" and (2) as "clm"
This usage maintains the precedent that "col" is the horizontal unstructured dimension in nco_rgr.c
It is necessary though unintuitive that "cols1d" variable metadata will use the "clm" abbreviation */
if(col_nm_in && (rcd=nco_inq_dimid_flg(in_id,col_nm_in,&dmn_id_col_in)) == NC_NOERR) /* do nothing */;
else if((rcd=nco_inq_dimid_flg(in_id,"lndgrid",&dmn_id_col_in)) == NC_NOERR) col_nm_in=strdup("lndgrid"); /* CLM */
if(dmn_id_col_in != NC_MIN_INT) flg_grd_1D=True;
/* Does data file have RLL grid? */
if(!flg_grd_1D){
if(lat_nm_in && (rcd=nco_inq_dimid_flg(in_id,lat_nm_in,&dmn_id_lat_in)) == NC_NOERR) /* do nothing */;
else if((rcd=nco_inq_dimid_flg(in_id,"latitude",&dmn_id_lat_in)) == NC_NOERR) lat_nm_in=strdup("lndgrid"); /* CF */
if(lon_nm_in && (rcd=nco_inq_dimid_flg(in_id,lon_nm_in,&dmn_id_lon_in)) == NC_NOERR) /* do nothing */;
else if((rcd=nco_inq_dimid_flg(in_id,"longitude",&dmn_id_lon_in)) == NC_NOERR) lon_nm_in=strdup("lndgrid"); /* CF */
} /* !flg_grd_1D */
if(dmn_id_lat_in != NC_MIN_INT && dmn_id_lon_in != NC_MIN_INT) flg_grd_rct=True;
/* Set where to obtain horizontal grid */
if(flg_grd_1D || flg_grd_rct) flg_grd_dat=True; else flg_grd_tpl=True;
if(flg_grd_tpl && !rgr->fl_hrz){
(void)fprintf(stderr,"%s: ERROR %s did not locate horizontal grid in input data file and no optional horizontal gridfile was provided.\nHINT: Use option --hrz to specify file with horizontal grid used by input data.\n",nco_prg_nm_get(),fnc_nm);
nco_exit(EXIT_FAILURE);
} /* !flg_grd_tpl */
/* Open grid template file iff necessary */
if(flg_grd_tpl && rgr->fl_hrz){
char *fl_pth_lcl=NULL;
nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */
nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */
nco_bool SHARE_OPEN=rgr->flg_uio; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */
size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */
/* Duplicate (because nco_fl_mk_lcl() free()'s its fl_in) */
fl_tpl=(char *)strdup(rgr->fl_hrz);
/* Make sure file is on local system and is readable or die trying */
fl_tpl=nco_fl_mk_lcl(fl_tpl,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN);
/* Open file using appropriate buffer size hints and verbosity */
if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE;
if(SHARE_OPEN) md_open=md_open|NC_SHARE;
rcd+=nco_fl_open(fl_tpl,md_open,&bfr_sz_hnt,&tpl_id);
/* Same logic used to search for grid in data file and to search for grid in template file...
Does template file have unstructured grid? */
if(col_nm_in && (rcd=nco_inq_dimid_flg(tpl_id,col_nm_in,&dmn_id_col_in)) == NC_NOERR) /* do nothing */;
else if((rcd=nco_inq_dimid_flg(tpl_id,"lndgrid",&dmn_id_col_in)) == NC_NOERR) col_nm_in=strdup("lndgrid"); /* CLM */
if(dmn_id_col_in != NC_MIN_INT) flg_grd_1D=True;
/* Does template file have RLL grid? */
if(!flg_grd_1D){
if(lat_nm_in && (rcd=nco_inq_dimid_flg(tpl_id,lat_nm_in,&dmn_id_lat_in)) == NC_NOERR) /* do nothing */;
else if((rcd=nco_inq_dimid_flg(tpl_id,"latitude",&dmn_id_lat_in)) == NC_NOERR) lat_nm_in=strdup("lndgrid"); /* CF */
if(lon_nm_in && (rcd=nco_inq_dimid_flg(tpl_id,lon_nm_in,&dmn_id_lon_in)) == NC_NOERR) /* do nothing */;
else if((rcd=nco_inq_dimid_flg(tpl_id,"longitude",&dmn_id_lon_in)) == NC_NOERR) lon_nm_in=strdup("lndgrid"); /* CF */
} /* !flg_grd_1D */
if(dmn_id_lat_in != NC_MIN_INT && dmn_id_lon_in != NC_MIN_INT) flg_grd_rct=True;
/* Set where to obtain horizontal grid */
if(!flg_grd_1D && !flg_grd_rct){
(void)fprintf(stderr,"%s: ERROR %s did not locate horizontal grid in input data file %s or in template file %s.\nHINT: One of those files must contain the grid dimensions and coordinates used by the packed data in the input data file.\n",nco_prg_nm_get(),fnc_nm,fl_in,fl_tpl);
nco_exit(EXIT_FAILURE);
} /* !flg_grd_1D */
} /* !flg_grd_tpl */
int cols1d_gridcell_index_id=NC_MIN_INT; /* [id] Gridcell index of column */
int cols1d_ixy_id=NC_MIN_INT; /* [id] Column 2D longitude index */
int cols1d_jxy_id=NC_MIN_INT; /* [id] Column 2D latitude index */
int cols1d_lat_id=NC_MIN_INT; /* [id] Column latitude */
int cols1d_lon_id=NC_MIN_INT; /* [id] Column longitude */
int grid1d_ixy_id=NC_MIN_INT; /* [id] Gridcell 2D longitude index */
int grid1d_jxy_id=NC_MIN_INT; /* [id] Gridcell 2D latitude index */
int grid1d_lat_id=NC_MIN_INT; /* [id] Gridcell latitude */
int grid1d_lon_id=NC_MIN_INT; /* [id] Gridcell longitude */
int land1d_gridcell_index_id=NC_MIN_INT; /* [id] Gridcell index of landunit */
int land1d_ixy_id=NC_MIN_INT; /* [id] Landunit 2D longitude index */
int land1d_jxy_id=NC_MIN_INT; /* [id] Landunit 2D latitude index */
int land1d_lat_id=NC_MIN_INT; /* [id] Landunit latitude */
int land1d_lon_id=NC_MIN_INT; /* [id] Landunit longitude */
int pfts1d_gridcell_index_id=NC_MIN_INT; /* [id] Gridcell index of PFT */
int pfts1d_column_index_id=NC_MIN_INT; /* [id] Column index of PFT */
int pfts1d_ixy_id=NC_MIN_INT; /* [id] PFT 2D longitude index */
int pfts1d_jxy_id=NC_MIN_INT; /* [id] PFT 2D latitude index */
int pfts1d_lat_id=NC_MIN_INT; /* [id] PFT latitude */
int pfts1d_lon_id=NC_MIN_INT; /* [id] PFT longitude */
int dmn_id_grd=NC_MIN_INT; /* [id] Dimension ID */
int dmn_id_lnd=NC_MIN_INT; /* [id] Dimension ID */
int dmn_id_clm=NC_MIN_INT; /* [id] Dimension ID */
int dmn_id_pft=NC_MIN_INT; /* [id] Dimension ID */
nco_bool flg_s1d_clm=False; /* [flg] Dataset contains sparse variables for columns */
nco_bool flg_s1d_grd=False; /* [flg] Dataset contains sparse variables for gridcells */
nco_bool flg_s1d_lnd=False; /* [flg] Dataset contains sparse variables for landunits */
nco_bool flg_s1d_pft=False; /* [flg] Dataset contains sparse variables for PFTs */
rcd=nco_inq_varid_flg(in_id,"cols1d_gridcell_index",&cols1d_gridcell_index_id);
if(cols1d_gridcell_index_id != NC_MIN_INT) flg_s1d_clm=True;
if(flg_s1d_clm){
rcd=nco_inq_varid(in_id,"cols1d_ixy",&cols1d_ixy_id);
rcd=nco_inq_varid(in_id,"cols1d_jxy",&cols1d_jxy_id);
rcd=nco_inq_varid(in_id,"cols1d_lat",&cols1d_lat_id);
rcd=nco_inq_varid(in_id,"cols1d_lon",&cols1d_lon_id);
} /* !flg_s1d_clm */
rcd=nco_inq_varid_flg(in_id,"grid1d_lat",&grid1d_lat_id);
if(grid1d_lat_id != NC_MIN_INT) flg_s1d_grd=True;
if(flg_s1d_grd){
rcd=nco_inq_varid(in_id,"grid1d_ixy",&grid1d_ixy_id);
rcd=nco_inq_varid(in_id,"grid1d_jxy",&grid1d_jxy_id);
rcd=nco_inq_varid(in_id,"grid1d_lon",&grid1d_lon_id);
} /* !flg_s1d_grd */
rcd=nco_inq_varid_flg(in_id,"land1d_gridcell_index",&land1d_gridcell_index_id);
if(land1d_gridcell_index_id != NC_MIN_INT) flg_s1d_lnd=True;
if(flg_s1d_lnd){
rcd=nco_inq_varid(in_id,"land1d_ixy",&land1d_ixy_id);
rcd=nco_inq_varid(in_id,"land1d_jxy",&land1d_jxy_id);
rcd=nco_inq_varid(in_id,"land1d_lat",&land1d_lat_id);
rcd=nco_inq_varid(in_id,"land1d_lon",&land1d_lon_id);
} /* !flg_s1d_lnd */
rcd=nco_inq_varid_flg(in_id,"pfts1d_gridcell_index",&pfts1d_gridcell_index_id);
if(pfts1d_gridcell_index_id != NC_MIN_INT) flg_s1d_pft=True;
if(flg_s1d_pft){
rcd=nco_inq_varid(in_id,"pfts1d_column_index",&pfts1d_column_index_id);
rcd=nco_inq_varid(in_id,"pfts1d_ixy",&pfts1d_ixy_id);
rcd=nco_inq_varid(in_id,"pfts1d_jxy",&pfts1d_jxy_id);
rcd=nco_inq_varid(in_id,"pfts1d_lat",&pfts1d_lat_id);
rcd=nco_inq_varid(in_id,"pfts1d_lon",&pfts1d_lon_id);
} /* !flg_s1d_pft */
assert(flg_s1d_clm || flg_s1d_lnd || flg_s1d_pft);
if(flg_s1d_clm) rcd=nco_inq_dimid(in_id,clm_nm_in,&dmn_id_clm);
if(flg_s1d_grd) rcd=nco_inq_dimid(in_id,grd_nm_in,&dmn_id_grd);
if(flg_s1d_lnd) rcd=nco_inq_dimid(in_id,lnd_nm_in,&dmn_id_lnd);
if(flg_s1d_pft) rcd=nco_inq_dimid(in_id,pft_nm_in,&dmn_id_pft);
if(nco_dbg_lvl_get() >= nco_dbg_std){
(void)fprintf(stderr,"%s: INFO %s necessary information to unpack cols1d variables\n",nco_prg_nm_get(),flg_s1d_clm ? "Found all" : "Could not find");
(void)fprintf(stderr,"%s: INFO %s necessary information to unpack lnds1d variables\n",nco_prg_nm_get(),flg_s1d_lnd ? "Found all" : "Could not find");
(void)fprintf(stderr,"%s: INFO %s necessary information to unpack pfts1d variables\n",nco_prg_nm_get(),flg_s1d_pft ? "Found all" : "Could not find");
} /* !dbg */
/* Collect other information from data and template files */
int dmn_nbr_in; /* [nbr] Number of dimensions in input file */
int dmn_nbr_out; /* [nbr] Number of dimensions in output file */
int var_nbr; /* [nbr] Number of variables in file */
rcd=nco_inq(in_id,&dmn_nbr_in,&var_nbr,(int *)NULL,(int *)NULL);
const unsigned int trv_nbr=trv_tbl->nbr; /* [idx] Number of traversal table entries */
int var_cpy_nbr=0; /* [nbr] Number of copied variables */
int var_rgr_nbr=0; /* [nbr] Number of unpacked variables */
int var_xcl_nbr=0; /* [nbr] Number of deleted variables */
int var_crt_nbr=0; /* [nbr] Number of created variables */
long idx; /* [idx] Generic index */
unsigned int idx_tbl; /* [idx] Counter for traversal table */
char *dmn_nm_cp; /* [sng] Dimension name as char * to reduce indirection */
nco_bool has_clm; /* [flg] Contains column dimension */
nco_bool has_grd; /* [flg] Contains gridcell dimension */
nco_bool has_lnd; /* [flg] Contains landunit dimension */
nco_bool has_pft; /* [flg] Contains PFT dimension */
nco_bool need_clm=False; /* [flg] At least one variable to unpack needs column dimension */
nco_bool need_grd=False; /* [flg] At least one variable to unpack needs gridcell dimension */
nco_bool need_lnd=False; /* [flg] At least one variable to unpack needs landunit dimension */
nco_bool need_pft=False; /* [flg] At least one variable to unpack needs PFT dimension */
trv_sct trv; /* [sct] Traversal table object structure to reduce indirection */
/* Define unpacking flag for each variable */
for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){
trv=trv_tbl->lst[idx_tbl];
if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){
dmn_nbr_in=trv_tbl->lst[idx_tbl].nbr_dmn;
has_clm=False;
has_grd=False;
has_lnd=False;
has_pft=False;
for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){
/* Pre-determine flags necessary during next loop */
dmn_nm_cp=trv.var_dmn[dmn_idx].dmn_nm;
if(!has_clm && clm_nm_in) has_clm=!strcmp(dmn_nm_cp,clm_nm_in);
if(!has_grd && grd_nm_in) has_grd=!strcmp(dmn_nm_cp,grd_nm_in);
if(!has_lnd && lnd_nm_in) has_lnd=!strcmp(dmn_nm_cp,lnd_nm_in);
if(!has_pft && pft_nm_in) has_pft=!strcmp(dmn_nm_cp,pft_nm_in);
} /* !dmn_idx */
/* Unpack variables that contain a sparse-1D dimension */
if(has_clm || has_lnd || has_pft){
trv_tbl->lst[idx_tbl].flg_rgr=True;
var_rgr_nbr++;
if(has_clm) need_clm=True;
if(has_grd) need_grd=True;
if(has_lnd) need_lnd=True;
if(has_pft) need_pft=True;
} /* endif */
assert(!(has_clm && has_lnd));
assert(!(has_clm && has_pft));
assert(!(has_lnd && has_pft));
/* Copy all variables that are not regridded or omitted */
if(!trv_tbl->lst[idx_tbl].flg_rgr) var_cpy_nbr++;
} /* end nco_obj_typ_var */
} /* end idx_tbl */
if(!var_rgr_nbr) (void)fprintf(stdout,"%s: WARNING %s reports no variables fit unpacking criteria. The sparse data unpacker expects at least one variable to unpack, and variables not unpacked are copied straight to output. HINT: If the name(s) of the input sparse-1D dimensions (e.g., \"column\", \"landunit\", and \"pft\") do not match NCO's preset defaults (case-insensitive unambiguous forms and abbreviations of \"column\", \"landunit\", and/or \"pft\", respectively) then change the dimension names that NCO looks for. Instructions are at http://nco.sf.net/nco.html#sparse. For CTSM/ELM sparse-1D coordinate grids, ensure that the \"column\" and \"landunit\" variable names are known with, e.g., \"ncks --rgr column_nm=clm --rgr landunit_nm=lnd\" or \"ncremap -R '--rgr clm=clm --rgr lnd=lnd'\".\n",nco_prg_nm_get(),fnc_nm);
if(nco_dbg_lvl_get() >= nco_dbg_fl){
for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){
trv=trv_tbl->lst[idx_tbl];
if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr) (void)fprintf(stderr,"Unpack %s? %s\n",trv.nm,trv.flg_rgr ? "Yes" : "No");
} /* end idx_tbl */
} /* end dbg */
int hrz_id; /* [id] Horizontal grid netCDF file ID */
long col_nbr; /* [nbr] Number of columns */
long lon_nbr; /* [nbr] Number of longitudes */
long lat_nbr; /* [nbr] Number of latitudes */
if(flg_grd_dat) hrz_id=in_id; else hrz_id=tpl_id;
if(flg_grd_1D) rcd=nco_inq_dimlen(hrz_id,dmn_id_col_in,&col_nbr);
if(flg_grd_rct){
rcd=nco_inq_dimlen(hrz_id,dmn_id_lat_in,&lat_nbr);
rcd=nco_inq_dimlen(hrz_id,dmn_id_lon_in,&lon_nbr);
} /* !flg_grd_rct */
if(flg_grd_tpl){
nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */
/* No further access to template file, close it */
nco_close(tpl_id);
/* Remove local copy of file */
if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_tpl);
} /* !flg_grd_tpl */
/* Lay-out unpacked file */
char *col_nm_out=NULL;
char *lat_nm_out=NULL;
char *lon_nm_out=NULL;
char *lat_dmn_nm_out;
char *lon_dmn_nm_out;
int dmn_id_col_out=NC_MIN_INT; /* [id] Dimension ID */
int dmn_id_lat_out=NC_MIN_INT; /* [id] Dimension ID */
int dmn_id_lon_out=NC_MIN_INT; /* [id] Dimension ID */
int col_out_id; /* [id] Variable ID for column */
int lon_out_id; /* [id] Variable ID for longitude */
int lat_out_id; /* [id] Variable ID for latitude */
if(rgr->col_nm_out) col_nm_out=rgr->col_nm_out; else col_nm_out=col_nm_in;
if(rgr->lat_dmn_nm) lat_dmn_nm_out=rgr->lat_dmn_nm; else lat_dmn_nm_out=lat_nm_in;
if(rgr->lon_dmn_nm) lon_dmn_nm_out=rgr->lon_dmn_nm; else lon_dmn_nm_out=lon_nm_in;
if(rgr->lat_nm_out) lat_nm_out=rgr->lat_nm_out; else lat_nm_out=lat_nm_in;
if(rgr->lon_nm_out) lon_nm_out=rgr->lon_nm_out; else lon_nm_out=lon_nm_in;
/* Define horizontal dimensions before all else */
if(flg_grd_1D){
rcd=nco_def_dim(out_id,col_nm_out,col_nbr,&dmn_id_col_out);
} /* !flg_grd_1D */
if(flg_grd_rct){
rcd=nco_def_dim(out_id,lat_nm_out,lat_nbr,&dmn_id_lat_out);
rcd=nco_def_dim(out_id,lon_nm_out,lon_nbr,&dmn_id_lon_out);
} /* !flg_grd_rct */
/* Pre-allocate dimension ID and cnt/srt space */
int *dmn_id_in=NULL; /* [id] Dimension IDs */
int *dmn_id_out=NULL; /* [id] Dimension IDs */
int dmn_nbr_max; /* [nbr] Maximum number of dimensions variable can have in input or output */
long *dmn_cnt_in=NULL;
long *dmn_cnt_out=NULL;
long *dmn_srt=NULL;
rcd+=nco_inq_ndims(in_id,&dmn_nbr_max);
dmn_id_in=(int *)nco_malloc(dmn_nbr_max*sizeof(int));
dmn_id_out=(int *)nco_malloc(dmn_nbr_max*sizeof(int));
if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt);
dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long));
if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in);
if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out);
dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long));
dmn_cnt_out=(long *)nco_malloc(dmn_nbr_max*sizeof(long));
//(void)fprintf(stdout,"%s: DEBUG quark1 dmn_nbr_out = %d, dmn_nbr_ps = %d\n",nco_prg_nm_get(),dmn_nbr_out,dmn_nbr_ps);
int shuffle; /* [flg] Turn-on shuffle filter */
int deflate; /* [flg] Turn-on deflate filter */
deflate=(int)True;
shuffle=NC_SHUFFLE;
dfl_lvl=rgr->dfl_lvl;
fl_out_fmt=rgr->fl_out_fmt;
const int dmn_nbr_0D=0; /* [nbr] Rank of 0-D grid variables (scalars) */
const int dmn_nbr_1D=1; /* [nbr] Rank of 1-D grid variables */
const nc_type crd_typ_out=NC_DOUBLE;
nc_type var_typ_rgr; /* [enm] Variable type used during regridding */
var_typ_rgr=NC_DOUBLE; /* NB: Perform interpolation in double precision */
if(flg_grd_1D){
rcd+=nco_def_var(out_id,col_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col_out,&col_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,col_out_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
} /* !flg_grd_1D */
if(flg_grd_rct){
rcd+=nco_def_var(out_id,lat_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lat_out,&lat_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_out_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
rcd+=nco_def_var(out_id,lon_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lon_out,&lon_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_out_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
} /* !flg_grd_rct */
/* Free pre-allocated array space */
if(dmn_id_in) dmn_id_in=(int *)nco_free(dmn_id_in);
if(dmn_id_out) dmn_id_out=(int *)nco_free(dmn_id_out);
if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt);
if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in);
if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out);
/* Unpack and copy data from input file */
int dmn_idx_col=int_CEWI; /* [idx] Index of column dimension */
int dmn_idx_lat=int_CEWI; /* [idx] Index of latitude dimension */
int dmn_idx_lon=int_CEWI; /* [idx] Index of longitude dimension */
int var_id; /* [id] Current variable ID */
long var_sz; /* [nbr] Size of variable */
nc_type var_typ_in; /* [enm] NetCDF type of input data */
nc_type var_typ_out; /* [enm] NetCDF type of data in output file */
nco_s1d_typ_enm nco_s1d_typ; /* [enm] Sparse-1D type of input variable */
ptr_unn var_val_in;
ptr_unn var_val_out;
#ifdef ENABLE_S1D
#ifdef __GNUG__
# pragma omp parallel for firstprivate(has_clm,has_grd,has_lnd,has_pft,var_val_in,var_val_out) private(dmn_cnt_in,dmn_cnt_out,dmn_id_in,dmn_id_out,dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dmn_nm,dmn_srt,grd_idx,has_mss_val,idx_in,idx_out,idx_tbl,in_id,lvl_idx_in,lvl_idx_out,lvl_nbr_in,lvl_nbr_out,mss_val_dbl,rcd,thr_idx,trv,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr) shared(dmn_id_col_in,dmn_id_col_out,dmn_id_pft_in,dmn_id_pft_out,dmn_id_tm_in,flg_s1d_clm,flg_s1d_pft,grd_nbr,idx_dbg,col_nbr_in,col_nbr_out,pft_nbr_in,pft_nbr_out,out_id,xtr_mth)
#endif /* !__GNUG__ */
for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){
trv=trv_tbl->lst[idx_tbl];
thr_idx=omp_get_thread_num();
in_id=trv_tbl->in_id_arr[thr_idx];
#ifdef _OPENMP
if(nco_dbg_lvl_get() >= nco_dbg_grp && !thr_idx && !idx_tbl) (void)fprintf(fp_stdout,"%s: INFO %s reports regrid loop uses %d thread%s\n",nco_prg_nm_get(),fnc_nm,omp_get_num_threads(),(omp_get_num_threads() > 1) ? "s" : "");
if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s: INFO thread = %d, idx_tbl = %d, nm = %s\n",nco_prg_nm_get(),thr_idx,idx_tbl,trv.nm);
#endif /* !_OPENMP */
if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){
if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s%s ",trv.flg_rgr ? "#" : "~",trv.nm);
if(trv.flg_rgr){
/* Interpolate variable */
if(strstr("cols1d",var_nm)){
nco_s1d_typ=nco_s1d_clm;
}else if(strstr("pfts1d",var_nm)){
nco_s1d_typ=nco_s1d_pft;
}else{
(void)fprintf(stderr,"%s: ERROR %s reports variable %s does not appear to be sparse\n",nco_prg_nm_get(),fnc_nm,var_nm);
nco_exit(EXIT_FAILURE);
} /* !strstr() */
if(nco_dbg_lvl_get() >= nco_dbg_std){
(void)fprintf(stderr,"%s: INFO %s reports variable %s is sparse type %s",nco_prg_nm_get(),fnc_nm,var_nm,nco_s1d_sng(nco_s1d_typ));
} /* !dbg */
}else{ /* !trv.flg_rgr */
/* Use standard NCO copy routine for variables that are not regridded
20190511: Copy them only once */
#pragma omp critical
{ /* begin OpenMP critical */
(void)nco_cpy_var_val(in_id,out_id,(FILE *)NULL,(md5_sct *)NULL,trv.nm,trv_tbl);
} /* end OpenMP critical */
} /* !flg_rgr */
} /* !xtr */
} /* end (OpenMP parallel for) loop over idx_tbl */
if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"\n");
if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: INFO %s completion report: Variables interpolated = %d, copied unmodified = %d, omitted = %d, created = %d\n",nco_prg_nm_get(),fnc_nm,var_rgr_nbr,var_cpy_nbr,var_xcl_nbr,var_crt_nbr);
/* Free output data memory */
#endif /* !ENABLE_S1D */
// if(col_nm_in) col_nm_in=(char *)nco_free(col_nm_in);
if(clm_nm_in) clm_nm_in=(char *)nco_free(clm_nm_in);
if(grd_nm_in) grd_nm_in=(char *)nco_free(grd_nm_in);
if(lnd_nm_in) lnd_nm_in=(char *)nco_free(lnd_nm_in);
if(pft_nm_in) pft_nm_in=(char *)nco_free(pft_nm_in);
return rcd;
} /* !nco_s1d_unpack() */
|
bfs_csr_bsp.c | #include "graph_defs.h"
#include "prefetcher.h"
#include <limits.h>
typedef struct bfs_metadata_st {
char touched;
volatile unsigned long queue_next;
} bfs_metadata_t;
static volatile unsigned long queue_head = ULONG_MAX;
static volatile unsigned long vertex_position = 0;
static bfs_metadata_t *metadata;
static csr_t * volatile graph;
unsigned long MAX_CACHE = ULONG_MAX;
long MIN_CACHE = 0;
unsigned long visited = 0;
void prefetcher_random_callback(unsigned long *laf, unsigned long laf_size,
unsigned long ift) {
static unsigned long old_hoq = ULONG_MAX;
unsigned long current_hoq = ULONG_MAX;
static unsigned long ra_depth = 0;
static char preload = 0;
static long pf_visited = 0;
unsigned long entries = 0;
/* Fill in inner-loop entries from BFS queue */
/*
if ((preload == 0) && (ra_depth > MAX_CACHE)) {
preload = 1;
current_hoq = ULONG_MAX;
}
*/
current_hoq = old_hoq;
if ((current_hoq == ULONG_MAX)
|| (((signed long) (pf_visited - visited)) > MIN_CACHE)/*|| (ra_depth > MIN_CACHE)*/) {
current_hoq = queue_head;
pf_visited = visited;
// ra_depth = 0;
}
// if (((signed long)(pf_visited - visited)) > MIN_CACHE) return;
/* if(current_hoq != ULONG_MAX) {
current_hoq = metadata[current_hoq].queue_next;
}
*/
while (entries != ift && current_hoq != ULONG_MAX) {
unsigned long page = graph->index[current_hoq];
unsigned long end = graph->index[current_hoq + 1];
page = page >> (ASSUME_PAGE_SHIFT + 3); /* offset is in bits ! */
end = end >> (ASSUME_PAGE_SHIFT + 3);
// if(laf[HASH_MODULO(page, laf_size)] != page) {
// laf[HASH_MODULO(page, laf_size)] = page;
// for (; page <= end; page++) {
// if (entries==ift) break;
laf[entries] = page;
if (end > page)
laf[entries + (2 * laf_size)] = end - page;
entries++;
// }
// }
old_hoq = current_hoq;
current_hoq = metadata[current_hoq].queue_next;
pf_visited++;
}
ra_depth += entries;
}
unsigned long prefetcher_sequential_callback(unsigned long* aux_offset) {
unsigned long offset = graph->index[vertex_position];
return offset >> (ASSUME_PAGE_SHIFT + 3);
}
unsigned long alist_entries_seen = 0;
// #pragma omp threadprivate(current_vertex)
unsigned long total_queue_demands = 0;
unsigned long queue_above_threshold = 0;
unsigned long queue_length = 0;
/* returns number of connected components */
static unsigned long bfs(csr_t *graph, unsigned long start_node) {
unsigned long i;
unsigned long components = 0;
unsigned long queue_tail = ULONG_MAX;
unsigned long nq_head = ULONG_MAX;
unsigned long nq_tail = ULONG_MAX;
char* finished_flag = malloc(sizeof(char) * omp_get_num_threads());
unsigned long time_comp, time_giant = 0, id_giant;
i = start_node;
do {
int level = 0;
vertex_position = i;
if (metadata[i].touched == 0) {
CLOCK_START(time_comp);
metadata[i].touched = 1;
visited++;
components++;
//fprintf(stderr, "C %ld %d\n", i, omp_get_thread_num());
} else {
i++;
if (i >= graph->vertex_cnt)
i = 0;
continue;
}
while (1) {
unsigned long current_vertex;
level++;
memset(finished_flag, 1, omp_get_num_threads());
#pragma omp parallel for
for (current_vertex = 0; current_vertex < graph->vertex_cnt;
current_vertex++) {
if (metadata[current_vertex].touched != level)
continue;
csr_edge_iterator_t iter;
csr_init_edge_iterator(graph, current_vertex, &iter);
while (csr_iter_step(graph, &iter) == 0) {
if (!iter.incoming) {
unsigned long target = iter.neighbour;
#pragma omp critical (atomicset)
{
if (metadata[target].touched == 0) {
metadata[target].touched = level + 1;
finished_flag[omp_get_thread_num()] = 0;
visited++;
//fprintf(stderr, "V %ld %d\n", target, omp_get_thread_num());
}
}
}
}
}
int j;
for (j = 0; j < omp_get_num_threads(); j++) {
if (finished_flag[i] == 0) {
break;
}
}
if (j == omp_get_num_threads()) break;
}
CLOCK_STOP(time_comp);
//fprintf(stderr, "%ld\n", time_comp);
if (time_comp > time_giant) {
time_giant = time_comp;
id_giant = i;
printf("Visited %ld\n", visited);
return 1;
}
i++;
if (i >= graph->vertex_cnt) {
i = 0;
}
} while (i != start_node);
// fprintf(stderr, "%ld %ld\n", visited, graph->vertex_cnt);
assert(visited == graph->vertex_cnt);
printf("TIME GIANT COMP %lu\n", time_giant);
printf("ID GIANT COMP %lu\n", id_giant);
return components;
}
int main(int argc, char **argv) {
unsigned long time_bfs, time_total, components;
CLOCK_START(time_total);
if (argc < 3) {
fprintf(stderr, "Usage %s graph_name root_id\n", argv[0]);
exit(-1);
}
#ifdef PREFETCHER
char *env_var;
env_var = getenv("CMAX");
if(env_var != NULL) {
MAX_CACHE = atol(env_var);
}
env_var = getenv("CMIN");
if(env_var != NULL) {
MIN_CACHE = atol(env_var);
}
bind_master();
init_prefetcher(prefetcher_random_callback,
NULL);
// prefetcher_sequential_callback);
#endif
graph = open_csr(argv[1]);
metadata = (bfs_metadata_t*) map_anon_memory(
graph->vertex_cnt * sizeof(bfs_metadata_t), "vertex metadata");
//balloon_inflate(); /* Simulate semi-em conditions */
print_mlocked_memory();
unsigned long root_id = atol(argv[2]);
assert(root_id < graph->vertex_cnt);
/* Perhaps mmap /dev/null instead ? */
memset(metadata, 0, graph->vertex_cnt * sizeof(bfs_metadata_t));
#ifdef PREFETCHER
launch_prefetch_thread(graph->fd_calist);
#endif
struct rusage ru_begin;
getrusage(RUSAGE_SELF, &ru_begin);
CLOCK_START(time_bfs);
components = bfs(graph, root_id);
CLOCK_STOP(time_bfs);
struct rusage ru_end;
getrusage(RUSAGE_SELF, &ru_end);
#ifdef PREFETCHER
terminate_prefetch_thread();
destroy_prefetcher();
#endif
munmap(metadata, graph->vertex_cnt * sizeof(bfs_metadata_t));
close_csr(graph);
CLOCK_STOP(time_total);
printf("COMPONENTS %lu\n", components);
printf("TIME BFS %lu\n", time_bfs);
printf("TIME TOTAL %lu\n", time_total);
print_rusage_stats(stdout, &ru_begin, &ru_end);
printf("F_THRESHOLD %f\n",
((double) queue_above_threshold) / total_queue_demands);
return 0;
}
|
test.c | #include <stdio.h>
#include "../utilities/check.h"
#define N 100
int main()
{
check_offloading();
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
// offload
#pragma omp target map(tofrom: a[0:100])
{
int k;
#pragma omp simd safelen(2)
for(k=0; k<100; k++) {
if (k > 1)
a[k] = a[k-2] + 2;
else
a[k] = k;
}
}
// host
for(i=0; i<N; i++)
aa[i] = i;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return 0;
}
}
// report
printf("done with %d errors\n", error);
return error;
}
|
weightedNorm2.c | /*
The MIT License (MIT)
Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
extern "C" void FUNC(weightedNorm2)(const dlong & Nblocks, const dlong & N,
const dfloat * __restrict__ cpu_w,
const dfloat * __restrict__ cpu_a,
dfloat * __restrict__ cpu_wa){
dfloat wa2 = 0;
#ifdef __NEKRS__OMP__
#pragma omp parallel for reduction(+:wa2)
#endif
for(int i=0;i<N;++i){
const dfloat ai = cpu_a[i];
const dfloat wi = cpu_w[i];
wa2 += ai*ai*wi;
}
cpu_wa[0] = wa2;
}
|
DRB100-task-reference-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Cover the implicitly determined rule: In an orphaned task generating construct,
* formal arguments passed by reference are firstprivate.
* This requires OpenMP 4.5 to work.
* Earlier OpenMP does not allow a reference type for a variable within firstprivate().
* */
#include <stdio.h>
#define MYLEN 100
int a[MYLEN];
void gen_task(int i)
{
a[i]= i+1;
}
int main()
{
int i=0;
#pragma omp parallel
{
#pragma omp for private(i)
for (i=0; i<MYLEN; i++)
{
gen_task(i);
}
}
/* correctness checking */
for (i=0; i<MYLEN; i++)
{
//assert (a[i]==i+1);
if (a[i]!= i+1)
{
printf("warning: a[%d] = %d, not expected %d\n", i, a[i], i+1);
}
}
return 0;
}
|
dropout-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file dropout-inl.h
* \brief
* \author Bing Xu, Da Zheng
*/
#ifndef MXNET_OPERATOR_NN_DROPOUT_INL_H_
#define MXNET_OPERATOR_NN_DROPOUT_INL_H_
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include <algorithm>
#include "../mxnet_op.h"
#include "../mshadow_op.h"
#include "../random/sampler.h"
#if defined(USE_MKL) && defined(_OPENMP)
#include <omp.h>
#include <mkl_vml_functions.h>
#include <mkl_vsl.h>
#endif // USE_MKL && _OPENMP
namespace dropout {
enum DropoutOpInputs {kData};
enum DropoutOpOutputs {kOut, kMask};
enum DropoutOpForwardResource {kRandom};
enum DropoutOpMode {kTraining, kAlways};
} // namespace dropout
namespace mxnet {
namespace op {
struct DropoutParam : public dmlc::Parameter<DropoutParam> {
float p;
int mode;
DMLC_DECLARE_PARAMETER(DropoutParam) {
DMLC_DECLARE_FIELD(p).set_default(0.5)
.set_range(0, 1)
.describe("Fraction of the input that gets dropped out during training time.");
DMLC_DECLARE_FIELD(mode)
.add_enum("training", dropout::kTraining)
.add_enum("always", dropout::kAlways)
.set_default(dropout::kTraining)
.describe("Whether to only turn on dropout during training or to also turn on for inference.");
}
}; // struct DropoutParam
template<typename xpu, typename DType>
class DropoutOp {
#if defined(USE_MKL) && defined(_OPENMP)
static void BernoulliGenerate(common::random::RandGenerator<cpu, DType> gen,
int n, double p, int* r) {
typename RandGenerator<xpu, DType>::Impl genImpl(&gen, 1);
const int seed = 17 + genImpl.rand() % 4096; // NOLINT(runtime/threadsafe_fn)
const int nthr = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel num_threads(nthr)
{
const int ithr = omp_get_thread_num();
const int avg_amount = (n + nthr - 1) / nthr;
const int my_offset = ithr * avg_amount;
const int my_amount = std::min(my_offset + avg_amount, n) - my_offset;
if (my_amount > 0) {
VSLStreamStatePtr stream;
vslNewStream(&stream, VSL_BRNG_MCG31, seed + my_offset);
vslSkipAheadStream(stream, my_offset);
viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, stream, my_amount, r + my_offset, p);
vslDeleteStream(&stream);
}
}
}
// MKL forward pass
static bool MSHADOW_CINLINE MKLForward(mshadow::Stream<cpu> *s, RandGenerator<cpu, DType> *pgen,
const double pkeep,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data) {
// BernoulliGenerate expects an array int, so for types smaller than int, the mask buffer
// will be too small, so we can;t use MKL in those cases
if (sizeof(DType) >= sizeof(int)) {
Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> data = in_data[dropout::kData].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> out = out_data[dropout::kOut].FlatTo2D<xpu, DType>(s);
DType *outptr = out.dptr_;
DType *dataptr = data.dptr_;
auto maskptr = reinterpret_cast<int *>(mask.dptr_);
int count = mask.shape_[0] * mask.shape_[1];
BernoulliGenerate(*pgen, count, pkeep, maskptr);
const float pk_1 = 1.0f / pkeep;
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = 0; i < count; ++i) {
outptr[i] = dataptr[i] * maskptr[i] * pk_1;
}
return true;
}
return false;
}
// MKL backward pass
static bool MSHADOW_CINLINE MKLBackward(mshadow::Stream<cpu> *s, const double pkeep,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &out_grad) {
if (sizeof(DType) >= sizeof(int)) {
Tensor<xpu, 2, DType> grad = out_grad[dropout::kOut].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> gdata = in_grad[dropout::kData].FlatTo2D<xpu, DType>(s);
DType *ingradptr = gdata.dptr_;
const DType *outgradptr = grad.dptr_;
auto maskptr = reinterpret_cast<int *>(mask.dptr_);
int count = mask.shape_[0] * mask.shape_[1];
const float pk_1 = 1.0f / pkeep;
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = 0; i < count; ++i) {
ingradptr[i] = outgradptr[i] * maskptr[i] * pk_1;
}
return true;
}
return false;
}
#ifdef __CUDACC__
// GPU never uses MKL
static bool MSHADOW_CINLINE MKLForward(mshadow::Stream<gpu> *s, RandGenerator<gpu, DType> *pgen,
const double pkeep,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data) {
return false;
}
static bool MSHADOW_CINLINE MKLBackward(mshadow::Stream<gpu> *s, const double pkeep,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &out_grad) {
return false;
}
#endif // __CUDACC__
#else // #if defined(USE_MKL) && defined(_OPENMP)
static bool MSHADOW_CINLINE MKLForward(mshadow::Stream<xpu> *s, RandGenerator<xpu, DType> *pgen,
const double pkeep,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data) {
return false;
}
static bool MSHADOW_CINLINE MKLBackward(mshadow::Stream<xpu> *s, const double pkeep,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &out_grad) {
return false;
}
#endif // #if defined(USE_MKL) && defined(_OPENMP)
public:
/*!
* \brief Dropout kernel, compute dropout tensor
*/
struct DropoutKernel {
/*!
* \brief Dropout kernel function
* \param id Thread number (0-based representing count)
* \param gen Random number generator
* \param N Total number of items in the output
* \param step Step between items, related to parallelism
* \param dropout_out Output dropout values
* \param mask_out Output mask (is multiplied to create dropout output, may be 0)
* \param input_data Input data to perform the dropout on
* \param pkeep Dropout rate (keep when the generated random number is less than this value)
*/
MSHADOW_XINLINE static void Map(int id,
RandGenerator<xpu, DType> gen,
const int N,
const int step,
DType *dropout_out,
DType *mask_out,
const DType *input_data,
const real_t pkeep) {
RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, {
const real_t rand_num = static_cast<real_t>(genImpl.uniform());
mask_out[i] = mshadow_op::threshold::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep);
dropout_out[i] = input_data[i] * mask_out[i];
});
}
};
void Init(const DropoutParam ¶m) {
this->pkeep_ = 1.0f - param.p;
this->mode_ = static_cast<dropout::DropoutOpMode>(param.mode);
}
void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data) {
if (req[dropout::kOut] != kNullOp) {
CHECK_EQ(in_data.size(), 1U);
if (ctx.is_train) {
CHECK_EQ(out_data.size(), 2U);
}
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob &out = out_data[dropout::kOut];
if (ctx.is_train || this->mode_ == dropout::kAlways) {
RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>();
CHECK_NOTNULL(pgen);
if (!MKLForward(s, pgen, this->pkeep_, in_data, out_data)) {
const TBlob &mask = out_data[dropout::kMask];
CHECK(req[dropout::kOut] != kAddTo);
LaunchRNG<DropoutKernel, xpu>(s, pgen, out.Size(),
out.dptr<DType>(),
mask.dptr<DType>(),
in_data[dropout::kData].dptr<DType>(),
this->pkeep_);
}
} else {
const TBlob& data = in_data[dropout::kData];
if (req[dropout::kOut] == kWriteTo) {
mxnet_op::copy(s, out, data);
} else {
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch(
s, out.Size(), out.dptr<DType>(), data.dptr<DType>());
});
}
}
}
}
void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad) {
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
if (ctx.is_train || mode_ == dropout::kAlways) {
if (!MKLBackward(s, this->pkeep_, in_grad, out_data, out_grad)) {
const TBlob &gdata = in_grad[dropout::kData];
const TBlob &grad = out_grad[dropout::kOut];
const TBlob &mask = out_data[dropout::kMask];
CHECK_EQ(grad.Size(), mask.Size());
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch(
s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>());
});
}
} else {
const TBlob& gdata = in_grad[dropout::kData];
const TBlob& grad = out_grad[dropout::kOut];
if (req[dropout::kData] == kWriteTo) {
mxnet_op::copy(s, gdata, grad);
} else {
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch(
s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>());
});
}
}
}
private:
/*! \brief Dropout rate (keep when the generated random number is less than this value) */
real_t pkeep_;
/*! \brief Dropout mode */
dropout::DropoutOpMode mode_;
}; // class DropoutOp
template<typename xpu>
void DropoutCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const DropoutParam& param = nnvm::get<DropoutParam>(attrs.parsed);
MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, {
static thread_local DropoutOp<xpu, DType> op;
op.Init(param);
op.Forward(ctx, inputs, req, outputs);
});
}
//template<typename xpu>
//void DropoutGradCompute(const nnvm::NodeAttrs& attrs,
// const OpContext& ctx,
// const std::vector<TBlob>& inputs,
// const std::vector<OpReqType>& req,
// const std::vector<TBlob>& outputs) {
// const DropoutParam& param = nnvm::get<DropoutParam>(attrs.parsed);
// CHECK_EQ(inputs.size(), 2U);
// CHECK_EQ(outputs.size(), 1);
// CHECK_EQ(req.size(), 1);
// std::vector<TBlob> out_grads(2);
// std::vector<TBlob> out_data(2);
// out_grads[dropout::kOut] = inputs[0];
// out_data[dropout::kMask] = inputs[1];
//
// MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, {
// static thread_local DropoutOp<xpu, DType> op;
// op.Init(param);
// op.Backward(ctx, out_grads, out_data, req, outputs);
// });
//}
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_NN_DROPOUT_INL_H_
|
zboxloop.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "_hypre_utilities.h"
#include "HYPRE_struct_ls.h"
#include "HYPRE_krylov.h"
#include "_hypre_struct_mv.h"
/*--------------------------------------------------------------------------
* Test driver to time new boxloops and compare to the old ones
*--------------------------------------------------------------------------*/
#define DEVICE_VAR
hypre_int
main( hypre_int argc,
char *argv[] )
{
HYPRE_Int arg_index;
HYPRE_Int print_usage;
HYPRE_Int nx, ny, nz;
HYPRE_Int P, Q, R;
HYPRE_Int time_index;
HYPRE_Int num_procs, myid;
HYPRE_Int dim;
HYPRE_Int rep, reps, fail, sum;
HYPRE_Int size;
hypre_Box *x1_data_box, *x2_data_box, *x3_data_box, *x4_data_box;
//HYPRE_Int xi1, xi2, xi3, xi4;
HYPRE_Int xi1;
HYPRE_Real *xp1, *xp2, *xp3, *xp4;
hypre_Index loop_size, start, unit_stride, index;
/*-----------------------------------------------------------
* Initialize some stuff
*-----------------------------------------------------------*/
/* Initialize MPI */
hypre_MPI_Init(&argc, &argv);
hypre_MPI_Comm_size(hypre_MPI_COMM_WORLD, &num_procs );
hypre_MPI_Comm_rank(hypre_MPI_COMM_WORLD, &myid );
/*-----------------------------------------------------------
* Set defaults
*-----------------------------------------------------------*/
dim = 3;
nx = 10;
ny = 10;
nz = 10;
P = num_procs;
Q = 1;
R = 1;
/*-----------------------------------------------------------
* Parse command line
*-----------------------------------------------------------*/
print_usage = 0;
arg_index = 1;
while (arg_index < argc)
{
if ( strcmp(argv[arg_index], "-n") == 0 )
{
arg_index++;
nx = atoi(argv[arg_index++]);
ny = atoi(argv[arg_index++]);
nz = atoi(argv[arg_index++]);
}
else if ( strcmp(argv[arg_index], "-P") == 0 )
{
arg_index++;
P = atoi(argv[arg_index++]);
Q = atoi(argv[arg_index++]);
R = atoi(argv[arg_index++]);
}
else if ( strcmp(argv[arg_index], "-d") == 0 )
{
arg_index++;
dim = atoi(argv[arg_index++]);
}
else if ( strcmp(argv[arg_index], "-help") == 0 )
{
print_usage = 1;
break;
}
else
{
arg_index++;
}
}
/*-----------------------------------------------------------
* Print usage info
*-----------------------------------------------------------*/
if ( (print_usage) && (myid == 0) )
{
hypre_printf("\n");
hypre_printf("Usage: %s [<options>]\n", argv[0]);
hypre_printf("\n");
hypre_printf(" -n <nx> <ny> <nz> : problem size per block\n");
hypre_printf(" -P <Px> <Py> <Pz> : processor topology\n");
hypre_printf(" -d <dim> : problem dimension (2 or 3)\n");
hypre_printf("\n");
}
if ( print_usage )
{
exit(1);
}
/*-----------------------------------------------------------
* Check a few things
*-----------------------------------------------------------*/
if ((P*Q*R) > num_procs)
{
if (myid == 0)
{
hypre_printf("Error: PxQxR is more than the number of processors\n");
}
exit(1);
}
else if ((P*Q*R) < num_procs)
{
if (myid == 0)
{
hypre_printf("Warning: PxQxR is less than the number of processors\n");
}
}
/*-----------------------------------------------------------
* Initialize some stuff
*-----------------------------------------------------------*/
hypre_SetIndex3(start, 1, 1, 1);
hypre_SetIndex3(loop_size, nx, ny, nz);
hypre_SetIndex3(unit_stride, 1, 1, 1);
x1_data_box = hypre_BoxCreate(dim);
x2_data_box = hypre_BoxCreate(dim);
x3_data_box = hypre_BoxCreate(dim);
x4_data_box = hypre_BoxCreate(dim);
hypre_SetIndex3(hypre_BoxIMin(x1_data_box), 0, 0, 0);
hypre_SetIndex3(hypre_BoxIMax(x1_data_box), nx+1, ny+1, nz+1);
hypre_CopyBox(x1_data_box, x2_data_box);
hypre_CopyBox(x1_data_box, x3_data_box);
hypre_CopyBox(x1_data_box, x4_data_box);
size = (nx+2)*(ny+2)*(nz+2);
xp1 = hypre_CTAlloc(HYPRE_Real, size, HYPRE_MEMORY_HOST);
xp2 = hypre_CTAlloc(HYPRE_Real, size, HYPRE_MEMORY_HOST);
xp3 = hypre_CTAlloc(HYPRE_Real, size, HYPRE_MEMORY_HOST);
xp4 = hypre_CTAlloc(HYPRE_Real, size, HYPRE_MEMORY_HOST);
reps = 1000000000/(nx*ny*nz+1000);
/*-----------------------------------------------------------
* Print driver parameters
*-----------------------------------------------------------*/
if (myid == 0)
{
hypre_printf("Running with these driver parameters:\n");
hypre_printf(" (nx, ny, nz) = (%d, %d, %d)\n", nx, ny, nz);
hypre_printf(" (Px, Py, Pz) = (%d, %d, %d)\n", P, Q, R);
hypre_printf(" dim = %d\n", dim);
hypre_printf(" reps = %d\n", reps);
}
/*-----------------------------------------------------------
* Check new boxloops
*-----------------------------------------------------------*/
/* xp1 is already initialized to 0 */
zypre_BoxLoop1Begin(dim, loop_size,
x1_data_box, start, unit_stride, xi1);
zypre_BoxLoop1For(xi1)
{
xp1[xi1] ++;
}
zypre_BoxLoop1End(xi1);
/* Use old boxloop to check that values are set to 1 */
fail = 0;
sum = 0;
hypre_SerialBoxLoop1Begin(3, loop_size,
x1_data_box, start, unit_stride, xi1);
{
sum += xp1[xi1];
if (xp1[xi1] != 1)
{
hypre_BoxLoopGetIndex(index);
hypre_printf("*(%d,%d,%d) = %d\n",
index[0], index[1], index[2], (HYPRE_Int) xp1[xi1]);
fail = 1;
}
}
hypre_SerialBoxLoop1End(xi1);
if (sum != (nx*ny*nz))
{
hypre_printf("*sum = %d\n", sum);
fail = 1;
}
if (fail)
{
exit(1);
}
/*-----------------------------------------------------------
* Synchronize so that timings make sense
*-----------------------------------------------------------*/
hypre_MPI_Barrier(hypre_MPI_COMM_WORLD);
/*-----------------------------------------------------------
* Time old boxloops
*-----------------------------------------------------------*/
/* Time BoxLoop0 */
time_index = hypre_InitializeTiming("BoxLoop0");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
xi1 = 0;
hypre_BoxLoop0Begin(3, loop_size);
{
xp1[xi1] += xp1[xi1];
//xi1++;
}
hypre_BoxLoop0End();
}
hypre_EndTiming(time_index);
/* Time BoxLoop1 */
time_index = hypre_InitializeTiming("BoxLoop1");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
hypre_BoxLoop1Begin(3, loop_size,
x1_data_box, start, unit_stride, xi1);
{
xp1[xi1] += xp1[xi1];
}
hypre_BoxLoop1End(xi1);
}
hypre_EndTiming(time_index);
/* Time BoxLoop2 */
time_index = hypre_InitializeTiming("BoxLoop2");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
hypre_BoxLoop2Begin(3, loop_size,
x1_data_box, start, unit_stride, xi1,
x2_data_box, start, unit_stride, xi2);
{
xp1[xi1] += xp1[xi1] + xp2[xi2];
}
hypre_BoxLoop2End(xi1, xi2);
}
hypre_EndTiming(time_index);
/* Time BoxLoop3 */
time_index = hypre_InitializeTiming("BoxLoop3");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
hypre_BoxLoop3Begin(3, loop_size,
x1_data_box, start, unit_stride, xi1,
x2_data_box, start, unit_stride, xi2,
x3_data_box, start, unit_stride, xi3);
{
xp1[xi1] += xp1[xi1] + xp2[xi2] + xp3[xi3];
}
hypre_BoxLoop3End(xi1, xi2, xi3);
}
hypre_EndTiming(time_index);
/* Time BoxLoop4 */
time_index = hypre_InitializeTiming("BoxLoop4");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
hypre_BoxLoop4Begin(3, loop_size,
x1_data_box, start, unit_stride, xi1,
x2_data_box, start, unit_stride, xi2,
x3_data_box, start, unit_stride, xi3,
x4_data_box, start, unit_stride, xi4);
{
xp1[xi1] += xp1[xi1] + xp2[xi2] + xp3[xi3] + xp4[xi4];
}
hypre_BoxLoop4End(xi1, xi2, xi3, xi4);
}
hypre_EndTiming(time_index);
hypre_PrintTiming("Old BoxLoop times", hypre_MPI_COMM_WORLD);
hypre_FinalizeTiming(time_index);
hypre_ClearTiming();
/*-----------------------------------------------------------
* Time new boxloops
*-----------------------------------------------------------*/
/* Time BoxLoop0 */
time_index = hypre_InitializeTiming("BoxLoop0");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
xi1 = 0;
zypre_BoxLoop0Begin(dim, loop_size);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ZYPRE_BOX_PRIVATE) firstprivate(xi1) HYPRE_SMP_SCHEDULE
#endif
zypre_BoxLoop0For()
{
xp1[xi1] += xp1[xi1];
xi1++;
}
zypre_BoxLoop0End();
}
hypre_EndTiming(time_index);
/* Time BoxLoop1 */
time_index = hypre_InitializeTiming("BoxLoop1");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
zypre_BoxLoop1Begin(dim, loop_size,
x1_data_box, start, unit_stride, xi1);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ZYPRE_BOX_PRIVATE) HYPRE_SMP_SCHEDULE
#endif
zypre_BoxLoop1For(xi1)
{
xp1[xi1] += xp1[xi1];
}
zypre_BoxLoop1End(xi1);
}
hypre_EndTiming(time_index);
/* Time BoxLoop2 */
time_index = hypre_InitializeTiming("BoxLoop2");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
zypre_BoxLoop2Begin(dim, loop_size,
x1_data_box, start, unit_stride, xi1,
x2_data_box, start, unit_stride, xi2);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ZYPRE_BOX_PRIVATE) HYPRE_SMP_SCHEDULE
#endif
zypre_BoxLoop2For(xi1, xi2)
{
xp1[xi1] += xp1[xi1] + xp2[xi2];
}
zypre_BoxLoop2End(xi1, xi2);
}
hypre_EndTiming(time_index);
/* Time BoxLoop3 */
time_index = hypre_InitializeTiming("BoxLoop3");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
zypre_BoxLoop3Begin(dim, loop_size,
x1_data_box, start, unit_stride, xi1,
x2_data_box, start, unit_stride, xi2,
x3_data_box, start, unit_stride, xi3);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ZYPRE_BOX_PRIVATE) HYPRE_SMP_SCHEDULE
#endif
zypre_BoxLoop3For(xi1, xi2, xi3)
{
xp1[xi1] += xp1[xi1] + xp2[xi2] + xp3[xi3];
}
zypre_BoxLoop3End(xi1, xi2, xi3);
}
hypre_EndTiming(time_index);
/* Time BoxLoop4 */
time_index = hypre_InitializeTiming("BoxLoop4");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
zypre_BoxLoop4Begin(dim, loop_size,
x1_data_box, start, unit_stride, xi1,
x2_data_box, start, unit_stride, xi2,
x3_data_box, start, unit_stride, xi3,
x4_data_box, start, unit_stride, xi4);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ZYPRE_BOX_PRIVATE) HYPRE_SMP_SCHEDULE
#endif
zypre_BoxLoop4For(xi1, xi2, xi3, xi4)
{
xp1[xi1] += xp1[xi1] + xp2[xi2] + xp3[xi3] + xp4[xi4];
}
zypre_BoxLoop4End(xi1, xi2, xi3, xi4);
}
hypre_EndTiming(time_index);
hypre_PrintTiming("New BoxLoop times", hypre_MPI_COMM_WORLD);
hypre_FinalizeTiming(time_index);
hypre_ClearTiming();
/*-----------------------------------------------------------
* Finalize things
*-----------------------------------------------------------*/
hypre_BoxDestroy(x1_data_box);
hypre_BoxDestroy(x2_data_box);
hypre_BoxDestroy(x3_data_box);
hypre_BoxDestroy(x4_data_box);
hypre_TFree(xp1, HYPRE_MEMORY_HOST);
hypre_TFree(xp2, HYPRE_MEMORY_HOST);
hypre_TFree(xp3, HYPRE_MEMORY_HOST);
hypre_TFree(xp4, HYPRE_MEMORY_HOST);
/* Finalize MPI */
hypre_MPI_Finalize();
return (0);
}
|
spmv.h | #pragma once
#include <thrust/functional.h>
#include <cusp/detail/functional.h>
//MW: add some OpenMP pragmas
namespace cusp
{
namespace detail
{
namespace host
{
//////////////
// COO SpMV //
//////////////
template <typename Matrix,
typename Vector1,
typename Vector2,
typename UnaryFunction,
typename BinaryFunction1,
typename BinaryFunction2>
void spmv_coo(const Matrix& A,
const Vector1& x,
Vector2& y,
UnaryFunction initialize,
BinaryFunction1 combine,
BinaryFunction2 reduce)
{
typedef typename Matrix::index_type IndexType;
typedef typename Vector2::value_type ValueType;
for(size_t i = 0; i < A.num_rows; i++)
y[i] = initialize(y[i]);
for(size_t n = 0; n < A.num_entries; n++)
{
const IndexType& i = A.row_indices[n];
const IndexType& j = A.column_indices[n];
const ValueType& Aij = A.values[n];
const ValueType& xj = x[j];
y[i] = reduce(y[i], combine(Aij, xj));
}
}
template <typename Matrix,
typename Vector1,
typename Vector2>
void spmv_coo(const Matrix& A,
const Vector1& x,
Vector2& y)
{
typedef typename Vector2::value_type ValueType;
spmv_coo(A, x, y,
cusp::detail::zero_function<ValueType>(),
thrust::multiplies<ValueType>(),
thrust::plus<ValueType>());
}
//////////////
// CSR SpMV //
//////////////
template <typename Matrix,
typename Vector1,
typename Vector2,
typename UnaryFunction,
typename BinaryFunction1,
typename BinaryFunction2>
void spmv_csr(const Matrix& A,
const Vector1& x,
Vector2& y,
UnaryFunction initialize,
BinaryFunction1 combine,
BinaryFunction2 reduce)
{
typedef typename Matrix::index_type IndexType;
typedef typename Vector2::value_type ValueType;
#pragma omp parallel for
for(size_t i = 0; i < A.num_rows; i++)
{
const IndexType& row_start = A.row_offsets[i];
const IndexType& row_end = A.row_offsets[i+1];
ValueType accumulator = initialize(y[i]);
for (IndexType jj = row_start; jj < row_end; jj++)
{
const IndexType& j = A.column_indices[jj];
const ValueType& Aij = A.values[jj];
const ValueType& xj = x[j];
accumulator = reduce(accumulator, combine(Aij, xj));
}
y[i] = accumulator;
}
}
template <typename Matrix,
typename Vector1,
typename Vector2>
void spmv_csr(const Matrix& A,
const Vector1& x,
Vector2& y)
{
typedef typename Vector2::value_type ValueType;
spmv_csr(A, x, y,
cusp::detail::zero_function<ValueType>(),
thrust::multiplies<ValueType>(),
thrust::plus<ValueType>());
}
//////////////
// DIA SpMV //
//////////////
template <typename Matrix,
typename Vector1,
typename Vector2,
typename UnaryFunction,
typename BinaryFunction1,
typename BinaryFunction2>
void spmv_dia(const Matrix& A,
const Vector1& x,
Vector2& y,
UnaryFunction initialize,
BinaryFunction1 combine,
BinaryFunction2 reduce)
{
typedef typename Matrix::index_type IndexType;
typedef typename Vector2::value_type ValueType;
const size_t num_diagonals = A.values.num_cols;
for(size_t i = 0; i < A.num_rows; i++)
y[i] = initialize(y[i]);
for(size_t i = 0; i < num_diagonals; i++)
{
const IndexType& k = A.diagonal_offsets[i];
const IndexType& i_start = std::max<IndexType>(0, -k);
const IndexType& j_start = std::max<IndexType>(0, k);
// number of elements to process in this diagonal
const IndexType N = std::min(A.num_rows - i_start, A.num_cols - j_start);
for(IndexType n = 0; n < N; n++)
{
const ValueType& Aij = A.values(i_start + n, i);
const ValueType& xj = x[j_start + n];
ValueType& yi = y[i_start + n];
yi = reduce(yi, combine(Aij, xj));
}
}
}
template <typename Matrix,
typename Vector1,
typename Vector2>
void spmv_dia(const Matrix& A,
const Vector1& x,
Vector2& y)
{
typedef typename Vector2::value_type ValueType;
spmv_dia(A, x, y,
cusp::detail::zero_function<ValueType>(),
thrust::multiplies<ValueType>(),
thrust::plus<ValueType>());
}
//////////////
// ELL SpMV //
//////////////
template <typename Matrix,
typename Vector1,
typename Vector2,
typename UnaryFunction,
typename BinaryFunction1,
typename BinaryFunction2>
void spmv_ell(const Matrix& A,
const Vector1& x,
Vector2& y,
UnaryFunction initialize,
BinaryFunction1 combine,
BinaryFunction2 reduce)
{
typedef typename Matrix::index_type IndexType;
typedef typename Vector2::value_type ValueType;
const size_t& num_entries_per_row = A.column_indices.num_cols;
const IndexType invalid_index = Matrix::invalid_index;
for(size_t i = 0; i < A.num_rows; i++)
y[i] = initialize(y[i]);
for(size_t n = 0; n < num_entries_per_row; n++)
{
for(size_t i = 0; i < A.num_rows; i++)
{
const IndexType& j = A.column_indices(i, n);
const ValueType& Aij = A.values(i,n);
if (j != invalid_index)
{
const ValueType& xj = x[j];
y[i] = reduce(y[i], combine(Aij, xj));
}
}
}
}
template <typename Matrix,
typename Vector1,
typename Vector2>
void spmv_ell(const Matrix& A,
const Vector1& x,
Vector2& y)
{
typedef typename Vector2::value_type ValueType;
spmv_ell(A, x, y,
cusp::detail::zero_function<ValueType>(),
thrust::multiplies<ValueType>(),
thrust::plus<ValueType>());
}
} // end namespace host
} // end namespace detail
} // end namespace cusp
|
memdbg.c | /*
* This software was written by Jim Fougeron jfoug AT cox dot net
* in 2013. No copyright is claimed, and the software is hereby
* placed in the public domain. In case this attempt to disclaim
* copyright and place the software in the public domain is deemed
* null and void, then the software is Copyright (c) 2013 Jim Fougeron
* and it is hereby released to the general public under the following
* terms:
*
* This software may be modified, redistributed, and used for any
* purpose, in source and binary forms, with or without modification.
*/
/*
* memdbg.c
* Memory management debugging (at runtime)
*
* memdbg.c contains routines detect, and report memory
* problems, such as double frees, passing bad pointers to
* free, most buffer overwrites. Also, tracking of non-freed
* data, showing memory leaks, can also be shown.
*
* Compilation Options (provided from Makefile CFLAGS)
*
* MEMDBG_ON If this is NOT defined, then memdbg will
* get out of your way, and most normal memory functions
* will be called with no overhead at all.
*
* MEMDBG_EXTRA_CHECKS If defined, then we do not 'really' free
* the memory. We simply set the fence posts to deleted status,
* and proceed. This allows us finding double frees, and other
* usages of smashes. NOTE, when this is set, and there are a
* LOT of memory alloc/frees, then at some point the calls to
* free will fail. If this happens, there is code in place that
* frees the oldest freed block (really frees it), and does that
* over and over again, until either we have no freed blocks left
* OR the app is able to allocate this new buffer. In this situation
* we do lose track of those older freed blocks of memory, but it
* allows the application to continue forward, even though this
* debugging code exausted all memory.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "common.h"
#define __MEMDBG__
#include "memdbg.h"
#ifdef _OPENMP
#include <omp.h>
#endif
/*
* This function ALWAYS must be defined. It is (HAS) to be used if there is code which
* has some library code that allocates memory which was NOT handled by one of the allocation
* functions within this wrapper class, BUT which really needs to be freed. Thus the user code
* really needs to have straight access to the libc function free(). We give them that access,
* but they have to call this function, and not the 'free' function, which would get wrapped
* and call into MEMDBG_free(p, filename, fileline).
*/
void MEMDBG_libc_free(void *p) {
free(p);
}
void *MEMDBG_libc_alloc(size_t size) {
return malloc(size);
}
void *MEMDBG_libc_calloc(size_t size) {
return calloc(1, size);
}
#if defined (MEMDBG_ON)
/*
* these fence posts (first fence post guarding underflow), are:
* MEMFPOST == allocated memory
* MEMFPOSTt == allocated 'tiny' memory (allocated with mem_alloc_tiny() from memory.c)
* MEMFPOSTd == freed (deleted) memory. Will only be set this way, and stored in the
* freed_memlist, if MEMDBG_EXTRA_CHECKS is set.
*/
#define MEMFPOST 0xa5a5a5a5
#define MEMFPOSTt 0xa555a5a5
#define MEMFPOSTd 0x5a5a5a5a
const char *cpMEMFPOST = "\xa5\xa5\xa5\xa5";
const char *cpMEMFPOSTd = "\x5a\x5a\x5a\x5a";
/*
* this structure will contain data that is butted RIGHT against
* the tail end of the allocated block. We put a fence post here,
* and thus can detect buffer overwrite.
*/
typedef struct _hdr2 {
/* we use a unsigned char, and do not care about alignment. We ALWAYS treat this var with
* a memcpy, memcmp, etc, so that this works the same on aligned required CPU or non-aligned required.
*/
unsigned char mdbg_fpst[4];
} MEMDBG_HDR2;
/*
* This structure is carefully crafted. It contains exactly
* 4 32 bit values (possibly 6 on non-aligned hw), and 4 pointers
* In the end, we should have alignment as good as we would
* get from the allocator. This puts our mdbg_fpst2 RIGHT against
* the start of the allocated block. We later will put the
* HDR2 RIGHT against the tail end of the buffer. This will
* allow us to catch a single byte over or underflow.
*/
typedef struct _hdr {
struct _hdr *mdbg_next;
struct _hdr *mdbg_prev;
MEMDBG_HDR2 *mdbg_hdr2; /* points to just 'right' after allocated memory, for overflow catching */
const char *mdbg_file;
ARCH_WORD_32 mdbg_line;
ARCH_WORD_32 mdbg_cnt;
size_t mdbg_size;
ARCH_WORD_32 mdbg_fpst; /* this should be 'right' against the allocated memory, for underflow catching */
} MEMDBG_HDR;
static size_t mem_size = 0;
static size_t max_mem_size = 0;
static size_t mem_sizet = 0;
static size_t max_mem_sizet = 0;
static MEMDBG_HDR *memlist = NULL;
static unsigned long alloc_cnt = 0;
#ifdef MEMDBG_EXTRA_CHECKS
static MEMDBG_HDR *freed_memlist = NULL;
static size_t freed_mem_size = 0;
static unsigned long freed_cnt = 0;
#endif
#define RESERVE_SZ (sizeof(MEMDBG_HDR))
#define CLIENT_2_HDR(a) ((MEMDBG_HDR *) (((char *) (a)) - RESERVE_SZ))
#define HDR_2_CLIENT(a) ((void *) (((char *) (a)) + RESERVE_SZ))
static void mem_fence_post_err_fp (void *, const char *, int, char *fp, int line);
static void mem_fence_post_err_ne_fp (void *, const char *, int, char *fp, int line);
static void mem_fence_post_errd_fp (void *, const char *, int, char *fp, int line);
static void mem_fence_post_errd_ne_fp(void *, const char *, int, char *fp, int line);
#define mem_fence_post_err(a,b,c) mem_fence_post_err_fp(a,b,c,__FILE__,__LINE__)
#define mem_fence_post_err_ne(a,b,c) mem_fence_post_err_ne_fp(a,b,c,__FILE__,__LINE__)
#define mem_fence_post_errd(a,b,c) mem_fence_post_errd_fp(a,b,c,__FILE__,__LINE__)
#define mem_fence_post_errd_ne(a,b,c) mem_fence_post_errd_ne_fp(a,b,c,__FILE__,__LINE__)
#ifdef MEMDBG_EXTRA_CHECKS
/* NOTE, which this function is called, the memory (client memory) gets SMASHED */
/* If this starts causing the program to crash, then it is likely that the client */
/* code is using dangling pointers by accessing the memory after a free or realloc */
static void MEMDBG_FREEDLIST_add(MEMDBG_HDR *);
#endif
/*
* these are now macros. This makes it easier for doing omp critical
* sections. It is illegal to branch into or out of a CRITICAL block
*/
#define MEMDBG_LIST_delete(p) \
if (p->mdbg_next != NULL) \
p->mdbg_next->mdbg_prev = p->mdbg_prev; \
if (p->mdbg_prev != NULL) \
p->mdbg_prev->mdbg_next = p->mdbg_next; \
else \
memlist = p->mdbg_next
#define MEMDBG_LIST_add(p) \
p->mdbg_next = memlist; \
p->mdbg_prev = NULL; \
if (memlist != NULL) \
memlist->mdbg_prev = p; \
memlist = p
/*
* This function can be called directly by client code.
* it lists how much memory is currently allocated.
* a good check before program exit, is are there 0
* bytes allocated.
*/
size_t MemDbg_Used(int show_freed) {
#ifdef MEMDBG_EXTRA_CHECKS
if (show_freed)
return freed_mem_size;
#endif
return mem_size+mem_sizet;
}
/*
* This function can be called directly by client code.
* It writes out all non-freed memory.
*/
void MemDbg_Display(FILE *fp) {
MEMDBG_HDR *p;
int idx;
if (!(mem_size+mem_sizet)) return;
fprintf(fp, "\n------------------------------\n");
fprintf(fp, "MEMDBG: allocation information (display):\n");
fprintf(fp, " current normal alloc mem (leaks)%llu max normal mem allocated: %llu\n", (unsigned long long)mem_size, (unsigned long long)max_mem_size);
fprintf(fp, " current 'tiny' alloc mem (leaks)%llu max tiny mem allocated: %llu\n", (unsigned long long)mem_sizet, (unsigned long long)max_mem_sizet);
#ifdef MEMDBG_EXTRA_CHECKS
fprintf(fp, " Freed mem size: %llu (freed cnt: %lu)", (unsigned long long)freed_mem_size, freed_cnt);
#endif
fprintf(fp, "\n");
fprintf(fp, "Index : alloc# : Size : File(Line) [first 20 bytes, or size of bytes]\n");
idx = 0;
p = memlist;
while (p != NULL) {
int bfreed = 0, bbad=0;
fprintf(fp, "%-5d : %-6d : %6llu : %s(%u)", idx++, p->mdbg_cnt, (unsigned long long)p->mdbg_size, p->mdbg_file, p->mdbg_line);
if (p->mdbg_fpst != MEMFPOST && p->mdbg_fpst != MEMFPOSTt) {
bbad=1;
if (p->mdbg_fpst == MEMFPOSTd) {
fprintf(fp, " INVALID ( freed already? )");
bfreed = 1;
}
else
fprintf(fp, " INVALID ( buffer underflow )");
}
if (memcmp(p->mdbg_hdr2->mdbg_fpst, cpMEMFPOST, 4)) {
if (bfreed && !memcmp(p->mdbg_hdr2->mdbg_fpst, cpMEMFPOSTd, 4)) {
bbad=1;
fprintf(fp, " YES Data was freed.");
}
else {
unsigned i;
char *cp = ((char*)p)+RESERVE_SZ;
fprintf(fp, " INVALID (buffer overflow) tail of block: ");
cp = (char*)p->mdbg_hdr2->mdbg_fpst;
cp -= 16;
for (i = 0; i < 20; ++i) {
if(*cp < ' ' || *cp > '~')
fprintf(fp, ".");
else
fprintf(fp, "%c", *cp);
++cp;
}
fprintf(fp, " and the head of the block was: ");
}
}
if (!bbad) {
unsigned i;
char *cp = ((char*)p)+RESERVE_SZ;
fprintf(fp, " ");
for (i = 0; i < 20 && i < p->mdbg_size; ++i) {
if(*cp < ' ' || *cp > '~')
fprintf(fp, ".");
else
fprintf(fp, "%c", *cp);
++cp;
}
}
fprintf(fp, "\n");
p = p->mdbg_next;
}
}
/*
* This function can be called directly by client code.
* It will walk the list of memory, 'looking' for errors.
*/
void MemDbg_Validate(int level) {
MemDbg_Validate_msg2(level, NULL, 0);
}
void MemDbg_Validate_msg(int level, const char *pMsg) {
MemDbg_Validate_msg2(level, pMsg, 0);
}
void MemDbg_Validate_msg2(int level, const char *pMsg, int bShowExMessages) {
/* Level 0 we ALWAYS walk the alloc list, looking for over/underwrite, and validate a few other items. */
MEMDBG_HDR *p = memlist;
int error = 0;
int cnt=0;
#ifdef MEMDBG_EXTRA_CHECKS
unsigned char *cp;
unsigned i;
#endif
if (bShowExMessages) {
if (pMsg)
fprintf(stderr, "%s\n", pMsg);
fprintf(stderr, "MemDbg_Validate level 0 checking");
}
while (p) {
if (p->mdbg_fpst != MEMFPOST && p->mdbg_fpst != MEMFPOSTt) {
++cnt;
if (cnt < 100) {
if (p->mdbg_fpst == MEMFPOSTd)
fprintf(stderr, "\nDeleted memory still in chain\n");
else {
fprintf(stderr, "\nMemory buffer underwrite found! Will try to list what file/line allocated the buffer\n");
mem_fence_post_err_ne(p, p->mdbg_file, p->mdbg_line);
}
}
error = 1;
}
if (memcmp(p->mdbg_hdr2->mdbg_fpst, cpMEMFPOST, 4)) {
++cnt;
if (cnt < 100) {
if (p->mdbg_fpst == MEMFPOSTd) {
} else {
fprintf(stderr, "\nMemory buffer overwrite found! Will try to list what file/line allocated the buffer\n");
mem_fence_post_err_ne(p, p->mdbg_file, p->mdbg_line);
}
}
error = 1;
}
// Loop detect code
{
MEMDBG_HDR volatile *p2 = p->mdbg_next;
while (p2) {
if (p2 == p || p2 == p2->mdbg_next) {
fprintf (stderr, "Error, internal loop in the memdbg linked list, aborting\n");
break;
}
p2 = p2->mdbg_next;
}
}
if (cnt > 1000)
break;
p = p->mdbg_next;
}
if (error) {
fprintf(stderr, "\nExiting due to the error detected\n");
if (cnt > 100)
fprintf(stderr, "There were %d total errors, only first 100 shown\n", cnt);
exit(1);
}
if (bShowExMessages)
fprintf(stderr, " Passed\n");
if (level == MEMDBG_VALIDATE_MIN) return;
#ifdef MEMDBG_EXTRA_CHECKS
// Ok, we have a list of all freed items. We will do work on this.
p = freed_memlist;
if (!p) return;
cnt = 0;
if (bShowExMessages)
fprintf(stderr, "MemDbg_Validate level 1 checking");
while (p) {
if (p->mdbg_fpst != MEMFPOSTd) {
++cnt;
if (cnt < 100)
fprintf(stderr, "\nFreed Memory buffer underwrite found! Will try to list what file/line allocated the buffer\n");
mem_fence_post_err_ne(p, p->mdbg_file, p->mdbg_line);
error = 1;
}
if (memcmp(p->mdbg_hdr2->mdbg_fpst, cpMEMFPOSTd, 4)) {
++cnt;
if (cnt < 100)
fprintf(stderr, "\nFreed Memory buffer overwrite found! Will try to list what file/line allocated the buffer\n");
mem_fence_post_err_ne(p, p->mdbg_file, p->mdbg_line);
error = 1;
}
// Loop detect code
{
MEMDBG_HDR *p2 = p->mdbg_next;
while (p2) {
if (p2 == p || p2 == p2->mdbg_next) {
fprintf (stderr, "Error, internal loop in the memdbg linked list, aborting\n");
break;
}
p2 = p2->mdbg_next;
}
}
if (cnt > 1000)
break;
p = p->mdbg_next;
}
if (error) {
fprintf(stderr, "\nExiting due to the error detected\n");
if (cnt > 100)
fprintf(stderr, "There were %d total errors, only first 100 shown\n", cnt);
exit(1);
}
if (bShowExMessages)
fprintf(stderr, " Passed\n");
if (level == MEMDBG_VALIDATE_DEEP) return;
p = freed_memlist;
cnt = 0;
if (bShowExMessages)
fprintf(stderr, "MemDbg_Validate level 2 checking");
while (p) {
cp = ((unsigned char*)p)+RESERVE_SZ;
if (p->mdbg_size != p->mdbg_hdr2->mdbg_fpst - cp) {
fprintf(stderr, "\nFreed Memory buffer underwrite found (size var busted)! Will try to list what file/line allocated the buffer\n");
mem_fence_post_err_ne(p, p->mdbg_file, p->mdbg_line);
error = 1;
} else {
for (i = 0; i < p->mdbg_size; ++i) {
// in 'deeper' mode, we only look at first 8 bytes. If these are not overwritten, it is less likely that the buffer
// has been written to. It 'can' be written to later on, and if we use deepest, we will look at the FULL buffer.
if (i == 8)
break;
if (*cp++ != 0xCD) {
++cnt;
if (cnt < 100)
fprintf(stderr, "\nFreed Memory buffer modification found! Will try to list what file/line allocated the buffer\n");
mem_fence_post_err_ne(p, p->mdbg_file, p->mdbg_line);
error = 1;
break;
}
}
}
// Loop detect code
{
MEMDBG_HDR *p2 = p->mdbg_next;
while (p2) {
if (p2 == p || p2 == p2->mdbg_next) {
fprintf (stderr, "Error, internal loop in the memdbg linked list, aborting\n");
break;
}
p2 = p2->mdbg_next;
}
}
if (cnt > 1000)
break;
p = p->mdbg_next;
}
if (error) {
fprintf(stderr, "\nExiting due to the error detected\n");
if (cnt > 100)
fprintf(stderr, "There were %d total errors, only first 100 shown\n", cnt);
exit(1);
}
if (bShowExMessages)
fprintf(stderr, " Passed\n");
if (level == MEMDBG_VALIDATE_DEEPER) return;
p = freed_memlist;
cnt = 0;
if (bShowExMessages)
fprintf(stderr, "MemDbg_Validate level 3 checking");
while (p) {
cp = ((unsigned char*)p)+RESERVE_SZ;
// in this deepest mode, we look at the ENTIRE buffer. In deeper, we looked at first 8, so here, we just start from 8 and look forward.
for (i = 8; i < p->mdbg_size; ++i) {
if (*cp++ != 0xCD) {
++cnt;
if (cnt < 100)
fprintf(stderr, "\nFreed Memory buffer modification found! Will try to list what file/line allocated the buffer\n");
mem_fence_post_err_ne(p, p->mdbg_file, p->mdbg_line);
error = 1;
break;
}
}
// Loop detect code
{
MEMDBG_HDR *p2 = p->mdbg_next;
while (p2) {
if (p2 == p || p2 == p2->mdbg_next) {
fprintf (stderr, "Error, internal loop in the memdbg linked list, aborting\n");
break;
}
p2 = p2->mdbg_next;
}
}
if (cnt > 1000)
break;
p = p->mdbg_next;
}
if (error) {
fprintf(stderr, "\nExiting due to the error detected\n");
if (cnt > 100)
fprintf(stderr, "There were %d total errors, only first 100 shown\n", cnt);
exit(1);
}
if (bShowExMessages)
fprintf(stderr, " Passed\n");
#endif
}
#ifdef MEMDBG_EXTRA_CHECKS
/* Ok, if we are out of memory, due to keeping too much freed memory around, then free
* up oldest blocks until we can malloc this block. the rar format is a bad actor,
* as could be many of the 'non-hash' (old zip for sure), as these have to decrypt
* a full file, to be assured the password is correct.
*/
static void release_oldest_freed_block() {
MEMDBG_HDR *p = freed_memlist, *pp;
if (!p) return;
#ifdef _OPENMP
#pragma omp critical (memdbg_crit)
#endif
{
p = freed_memlist;
while (p->mdbg_next)
p = p->mdbg_next;
// now unlink it.
freed_mem_size -= p->mdbg_size;
--freed_cnt;
p->mdbg_prev->mdbg_next = NULL;
pp = p->mdbg_prev;
}
// now free it
free(p);
if (freed_cnt > 10) {
// free one more.
#ifdef _OPENMP
#pragma omp critical (memdbg_crit)
{
// NOTE, we can not be assured that pp was still pointing
// to the last item in the list. We have to look AGAIN,
// within a critical section.
pp = freed_memlist;
while (pp->mdbg_next)
pp = pp->mdbg_next;
#endif
freed_mem_size -= pp->mdbg_size;
--freed_cnt;
pp->mdbg_prev->mdbg_next = NULL;
#ifdef _OPENMP
}
#endif
// now free it
free(pp);
}
}
#endif
void * MEMDBG_calloc(size_t size, char *file, int line)
{
char *p;
if ( ((signed long long)mem_size) < 0)
fprintf(stderr, "MEMDBG_calloc %lld %s:%d mem:%lld\n", (unsigned long long)size, file, line, (unsigned long long)mem_size);
p = (char*)MEMDBG_alloc(size,file,line);
memset(p, 0, size);
return p;
}
/*
* MEMDBG_alloc
* Allocate a memory block. makes a protected call to malloc(), allocating
* extra data, and adding data to all required structures.
*/
void * MEMDBG_alloc(size_t size, char *file, int line)
{
MEMDBG_HDR *p;
if ( ((signed long long)mem_size) < 0)
fprintf(stderr, "MEMDBG_alloc %lld %s:%d mem:%lld\n", (unsigned long long)size, file, line, (unsigned long long)mem_size);
p = (MEMDBG_HDR*)malloc(RESERVE_SZ + size + 4);
#ifdef MEMDBG_EXTRA_CHECKS
#ifdef _OPENMP
{
int i = 0;
do {
#pragma omp critical (memdbg_crit)
{
if (!p && freed_mem_size > (RESERVE_SZ + size + 4) && !p && freed_cnt)
i = 1;
}
if (i) {
release_oldest_freed_block();
p = (MEMDBG_HDR*)malloc(RESERVE_SZ + size + 4);
}
} while (i && !p);
}
#else
/* this is the 'right' block, but hard to do with the restrictions of no branching out that omp critical places on us */
if (!p && freed_mem_size > (RESERVE_SZ + size + 4)) {
while (!p && freed_cnt) {
release_oldest_freed_block();
p = (MEMDBG_HDR*)malloc(RESERVE_SZ + size + 4);
}
}
#endif
#endif
if (!p) {
if ( ((signed long long)mem_size) < 0)
fprintf(stderr, "MEMDBG_alloc (end) %lld %s:%d mem:%lld\n", (unsigned long long)size, file, line, (unsigned long long)mem_size);
return NULL;
}
p->mdbg_fpst = MEMFPOST;
p->mdbg_size = size;
p->mdbg_file = file;
p->mdbg_line = line;
p->mdbg_hdr2 = (MEMDBG_HDR2*)(((char*)p)+RESERVE_SZ + size);
memcpy(p->mdbg_hdr2, cpMEMFPOST, 4);
#ifdef _OPENMP
#pragma omp critical (memdbg_crit)
#endif
{
p->mdbg_cnt = ++alloc_cnt;
mem_size += size;
if (mem_size > max_mem_size)
max_mem_size = mem_size;
MEMDBG_LIST_add(p);
}
if ( ((signed long long)mem_size) < 0)
fprintf(stderr, "MEMDBG_alloc (end) %lld %s:%d mem:%lld\n", (unsigned long long)size, file, line, (unsigned long long)mem_size);
return HDR_2_CLIENT(p);
}
/*
* MEMDBG_realloc
* Reallocate a memory block makes a protected call to realloc(), allocating
* extra data, and adding data to all required structures.
* *** realloc is a NASTY function. The code here has taken a few turns,
* trying to handle all of the nuances of this function, and how we hook it,
* and how we deal with trying to not free data (if in MEMDBG_EXTRA_CHECKS mode)
*/
void *
MEMDBG_realloc(const void *ptr, size_t size, char *file, int line)
{
MEMDBG_HDR *p;
int istiny=0;
int err=0, i;
if ( ((signed long long)mem_size) < 0)
fprintf(stderr, "MEMDBG_realloc(%lld) %s:%d mem:%lld\n", (unsigned long long)size, file, line, (unsigned long long)mem_size);
/* if ptr is null, this function works just like alloc, so simply use alloc */
if (!ptr)
return MEMDBG_alloc(size, file, line);
#ifdef _OPENMP
#pragma omp critical
#endif
{
p = CLIENT_2_HDR(ptr);
if (p->mdbg_fpst == MEMFPOSTt)
istiny = 1;
else if (p->mdbg_fpst != MEMFPOST)
err = 1;
else {
for (i = 0; i < 4; ++i)
if (((char*)(p->mdbg_hdr2->mdbg_fpst))[i] != cpMEMFPOST[i]) {
err = 1;
break;
}
}
if (err) {
if (p->mdbg_fpst == MEMFPOSTd)
err = 2;
else {
for (i = 0; i < 4; ++i)
if (((char*)(p->mdbg_hdr2->mdbg_fpst))[i] != cpMEMFPOSTd[i]) {
break;
}
if (i < 4)
err = 2;
}
}
}
if (err) {
if (err == 2)
mem_fence_post_errd(p, file, line);
else
mem_fence_post_err(p, file, line);
return NULL;
}
/* if size == 0, this function works exactly like free, so just use free */
if (!size) {
/* NOTE, use ptr, and NOT p */
MEMDBG_free(ptr, file, line);
return NULL;
}
p->mdbg_fpst = MEMFPOSTd;
memcpy(p->mdbg_hdr2->mdbg_fpst, cpMEMFPOSTd, 4);
#ifdef _OPENMP
#pragma omp critical (memdbg_crit)
#endif
{
if (istiny)
mem_sizet -= p->mdbg_size;
else
mem_size -= p->mdbg_size;
MEMDBG_LIST_delete(p);
}
#ifdef MEMDBG_EXTRA_CHECKS
if (size > p->mdbg_size) {
void *p2 = MEMDBG_alloc(size, file, line);
if (p2) {
if (istiny)
MEMDBG_tag_mem_from_alloc_tiny(p);
memcpy(p2, ((char*)p)+RESERVE_SZ, p->mdbg_size);
/* we had to keep the original data 'clean' until now.
* but Now, we can put it on free list (which smashes
* the original memory block
*/
MEMDBG_FREEDLIST_add(p);
return p2;
}
/* We have to undo the MEMDBG_LIST_delete(p); because realloc should
* leave the ORIGINAL buffer alone, if we can not allocate more
* memory. Thus we need to 'leave' the leak alone. This is a leak
* unless the client code frees the original pointer. 'undoing' the
* MEMDBG_LIST_delete(p) keeps our code knowing this is a lost pointer.
*/
p->mdbg_fpst = MEMFPOST;
memcpy(p->mdbg_hdr2, cpMEMFPOST, 4);
#ifdef _OPENMP
#pragma omp critical (memdbg_crit)
#endif
{
mem_size += p->mdbg_size;
if (mem_size > max_mem_size)
max_mem_size = mem_size;
MEMDBG_LIST_add(p);
}
if (istiny)
MEMDBG_tag_mem_from_alloc_tiny(p);
return NULL;
}
/* NOTE, it is assumed that the memory will NOT be freed, so we simply drop
through, and allow normal realloc to work, and DO NOT try to put anything
onto the FREEDLIST, since it will just be the same block */
#endif
p = (MEMDBG_HDR *) realloc(p, RESERVE_SZ + size + 4);
#ifdef MEMDBG_EXTRA_CHECKS
#ifdef _OPENMP
{
int i = 0;
do {
#pragma omp critical (memdbg_crit)
{
if (!p && freed_mem_size > (RESERVE_SZ + size + 4) && !p && freed_cnt)
i = 1;
}
if (i) {
release_oldest_freed_block();
p = (MEMDBG_HDR*)realloc(CLIENT_2_HDR(ptr), RESERVE_SZ + size + 4);
}
} while (i && !p);
}
#else
/* this is the 'right' block, but hard to do with the restrictions of no branching out that omp critical places on us */
if (!p && freed_mem_size > (RESERVE_SZ + size + 4)) {
while (!p && freed_cnt) {
release_oldest_freed_block();
p = (MEMDBG_HDR*)realloc(CLIENT_2_HDR(ptr), RESERVE_SZ + size + 4);
}
}
#endif
#endif
if (!p)
{
/* We have to undo the MEMDBG_LIST_delete(p); because realloc should
* leave the ORIGINAL buffer alone, if we can not allocate more
* memory. Thus we need to 'leave' the leak alone.
*/
p = CLIENT_2_HDR(ptr); /* we have to get 'original' pointer again */
p->mdbg_fpst = MEMFPOST;
memcpy(p->mdbg_hdr2, cpMEMFPOST, 4);
#ifdef _OPENMP
#pragma omp critical (memdbg_crit)
#endif
{
mem_size += p->mdbg_size;
if (mem_size > max_mem_size)
max_mem_size = mem_size;
MEMDBG_LIST_add(p);
}
if (istiny)
MEMDBG_tag_mem_from_alloc_tiny(p);
return NULL;
}
p->mdbg_fpst = MEMFPOST;
p->mdbg_size = size;
p->mdbg_file = file;
p->mdbg_line = line;
p->mdbg_hdr2 = (MEMDBG_HDR2*)(((char*)p)+RESERVE_SZ + size);
memcpy(p->mdbg_hdr2, cpMEMFPOST, 4);
#ifdef _OPENMP
#pragma omp critical (memdbg_crit)
#endif
{
p->mdbg_cnt = ++alloc_cnt;
mem_size += size;
if (mem_size > max_mem_size)
max_mem_size = mem_size;
MEMDBG_LIST_add(p);
}
if (istiny)
MEMDBG_tag_mem_from_alloc_tiny(p);
return HDR_2_CLIENT(p);
}
/*
* MEMDBG_strdup
* Duplicate a ASCIIZ string in memory, with a protected call to strdup,
* allocating extra data, and adding data to all required structures.
*/
char *MEMDBG_strdup(const char *str, char *file, int line)
{
char * s;
if ( ((signed long long)mem_size) < 0)
fprintf(stderr, "MEMDBG_strdup(%ld) %s:%d mem:%lld\n", strlen(str), file, line, (unsigned long long)mem_size);
s = (char*)MEMDBG_alloc(strlen(str)+1, file, line);
if (s != NULL)
strcpy(s, str);
return s;
}
/*
* MEMDBG_free
* Free a memory block, checking a lot of data, which would have been
* set at allocation time.
*/
void MEMDBG_free(const void *ptr, char *file, int line)
{
MEMDBG_HDR *p;
int err=0, i;
#ifdef _OPENMP
#pragma omp critical (memdbg_crit)
#endif
{
p = CLIENT_2_HDR(ptr);
if (p->mdbg_fpst == MEMFPOSTt)
mem_sizet -= p->mdbg_size;
else if (p->mdbg_fpst == MEMFPOST)
mem_size -= p->mdbg_size;
else if (p->mdbg_fpst != MEMFPOST)
err = 1;
else {
for (i = 0; i < 4; ++i)
if (((char*)(p->mdbg_hdr2->mdbg_fpst))[i] != cpMEMFPOST[i]) {
err = 1;
break;
}
}
if (err && p->mdbg_fpst == MEMFPOSTd)
err = 2;
MEMDBG_LIST_delete(p);
p->mdbg_fpst = MEMFPOSTd;
for (i = 0; i < 4; ++i)
((char*)(p->mdbg_hdr2->mdbg_fpst))[i] = cpMEMFPOSTd[i];
}
if (err) {
if (err == 2)
mem_fence_post_errd(p, file, line);
else
mem_fence_post_err(p, file, line);
return;
}
#ifndef MEMDBG_EXTRA_CHECKS
free(p);
#else
MEMDBG_FREEDLIST_add(p);
#endif
if ( ((signed long long)mem_size) < 0)
fprintf(stderr, "MEMDBG_free (end) %s:%d mem:%lld\n", file, line, (unsigned long long)mem_size);
}
#ifdef MEMDBG_EXTRA_CHECKS
/* NOTE, there is no LIST_delete() for the freed list. We only put
* data onto this list, it is kept for full runtime. We may want to
* later add some way for the app to clean it up, but for now, we
* add it, and keep it all.
*/
static void MEMDBG_FREEDLIST_add(MEMDBG_HDR *p)
{
unsigned char *cp;
size_t i;
#ifdef _OPENMP
#pragma omp critical (memdbg_crit)
#endif
{
freed_mem_size += p->mdbg_size;
++freed_cnt;
p->mdbg_next = freed_memlist;
p->mdbg_prev = NULL;
if (freed_memlist != NULL)
freed_memlist->mdbg_prev = p;
freed_memlist = p;
/* Ok, now 'DEADBEEF' the original data buffer */
cp = ((unsigned char*)p)+RESERVE_SZ;
for (i = 0; i < p->mdbg_size; ++i)
*cp++ = 0xCD;
}
}
#endif
/*
*these functions allow taking a memory snapshot,
* calling some code, then validating that memory
* is the same after the code. This will help
* catch memory leaks and other such problems, within
* formats and such. Simply get the snapshot,
* run self tests (or other), when it exits, check
* the snapshot to make sure nothing leaked.
*/
MEMDBG_HANDLE MEMDBG_getSnapshot(int id) {
MEMDBG_HANDLE h;
h.id = id;
h.mem_size = mem_size;
h.alloc_cnt = alloc_cnt;
return h;
}
void MEMDBG_checkSnapshot(MEMDBG_HANDLE h) {
/* call the real function, but list do not exit on leak */
MEMDBG_checkSnapshot_possible_exit_on_error(h,0);
}
/* NOT needed to be thread safe, must be called from single threaded code */
void MEMDBG_checkSnapshot_possible_exit_on_error(MEMDBG_HANDLE h, int exit_on_any_leaks) {
/* ok, we do several things.
* 1 walk allocation change, showing any memory 'newer' than in the handle (not tiny alloc stuff).
* 2 validate allocation chain (and free chain if in extra mode).
* if there were any errors in #2, then exit.
* if any memory leaks (#1) and exit_on_any_leaks true, we also exit. */
MEMDBG_HDR *p = memlist;
int leak = 0;
/* first step, walk allocation list, looking for leaks */
while (p) {
if (p->mdbg_cnt > h.alloc_cnt && p->mdbg_fpst == MEMFPOST) {
leak = 1;
fprintf(stderr, "Mem leak: %llu bytes, alloc_num %d, file %s, line %d\n", (unsigned long long)p->mdbg_size, p->mdbg_cnt, p->mdbg_file, p->mdbg_line);
}
p = p->mdbg_next;
}
MemDbg_Validate_msg2(3, "MEMDBG_checkSnapshot", 0);
if (leak) {
exit(1);
}
}
/* MUST be thread safe */
void MEMDBG_tag_mem_from_alloc_tiny(void *ptr) {
MEMDBG_HDR *p;
p = CLIENT_2_HDR(ptr);
#ifdef _OPENMP
#pragma omp critical (memdbg_crit)
#endif
{
if (p->mdbg_fpst == MEMFPOST) {
p->mdbg_fpst = MEMFPOSTt;
mem_size -= p->mdbg_size;
mem_sizet += p->mdbg_size;
if (mem_sizet > max_mem_sizet)
max_mem_sizet = mem_sizet;
}
}
}
static void mem_fence_post_err_fp(void *p, const char *file, int line, char *fp, int line2)
{
mem_fence_post_err_ne_fp(p, file, line,fp,line2);
MemDbg_Display(stderr);
exit(1);
}
static void mem_fence_post_errd_fp(void *p, const char *file, int line, char *fp, int line2)
{
mem_fence_post_errd_ne_fp(p, file, line,fp,line2);
MemDbg_Display(stderr);
exit(1);
}
static void mem_fence_post_err_ne_fp(void *p, const char *file, int line, char *fp, int line2)
{
char buf[120], *cp=buf, *ip;
int i;
ip = (char*) p;
for (i = 0; i < 16; ++i) {
if (ip[i] >= ' ' && ip[i] <= '~')
*cp++ = ip[i];
else
*cp++ = '.';
}
*cp++ = ' ';
for (i = 0; i < 16; ++i)
cp += sprintf(cp, " %02x", (unsigned char)ip[i]);
fprintf(stderr, "Memory fence_post error - %p - %s(%d) (%d)\n\tdata: (%s)\n", p, file, line, line2, buf);
}
static void mem_fence_post_errd_ne_fp(void *p, const char *file, int line, char *fp, int line2)
{
fprintf(stderr, "Memory fence_postd error, using dangling pointer, memory already freed - %p - %s(%d) (%d)\n", p, file, line, line2);
}
#else
void MEMDBG_off_free(void *a) {
free(a);
}
#endif /* MEMDBG_ON */
|
DetailedPlaceDB.h | /**
* @file DetailedPlaceDB.h
* @author Yibo Lin
* @date Jan 2019
*/
#ifndef _DREAMPLACE_UTILITY_DETAILEDPLACEDB_H
#define _DREAMPLACE_UTILITY_DETAILEDPLACEDB_H
#include "utility/src/Msg.h"
#include "utility/src/Box.h"
#include "legality_check/src/legality_check.h"
#include "draw_place/src/draw_place.h"
DREAMPLACE_BEGIN_NAMESPACE
template <typename T>
struct Space
{
T xl;
T xh;
};
struct BinMapIndex
{
int bin_id;
int sub_id;
};
struct RowMapIndex
{
int row_id;
int sub_id;
};
/// @brief a wrapper class of required data for detailed placement
template <typename T>
struct DetailedPlaceDB
{
typedef T type;
const T* init_x;
const T* init_y;
const T* node_size_x;
const T* node_size_y;
const T* flat_region_boxes; ///< number of boxes x 4
const int* flat_region_boxes_start; ///< number of regions + 1
const int* node2fence_region_map; ///< length of number of movable cells
T* x;
T* y;
const int* flat_net2pin_map;
const int* flat_net2pin_start_map;
const int* pin2net_map;
const int* flat_node2pin_map;
const int* flat_node2pin_start_map;
const int* pin2node_map;
const T* pin_offset_x;
const T* pin_offset_y;
const unsigned char* net_mask;
T xl;
T yl;
T xh;
T yh;
T site_width;
T row_height;
T bin_size_x;
T bin_size_y;
int num_bins_x;
int num_bins_y;
int num_sites_x;
int num_sites_y;
int num_nodes;
int num_movable_nodes;
int num_nets;
int num_pins;
int num_regions; ///< number of regions for flat_region_boxes and flat_region_boxes_start
inline int pos2site_x(T xx) const
{
int sx = (xx-xl)/site_width;
sx = std::max(sx, 0);
sx = std::min(sx, num_sites_x-1);
return sx;
}
inline int pos2site_y(T yy) const
{
int sy = (yy-yl)/row_height;
sy = std::max(sy, 0);
sy = std::min(sy, num_sites_y-1);
return sy;
}
/// @brief site index as an upper bound
inline int pos2site_ub_x(T xx) const
{
int sx = ceil((xx-xl)/site_width);
sx = std::max(sx, 1);
sx = std::min(sx, num_sites_x);
return sx;
}
/// @brief site index as an upper bound
inline int pos2site_ub_y(T yy) const
{
int sy = ceil((yy-yl)/row_height);
sy = std::max(sy, 1);
sy = std::min(sy, num_sites_y);
return sy;
}
inline int pos2bin_x(T xx) const
{
int bx = (xx-xl)/bin_size_x;
bx = std::max(bx, 0);
bx = std::min(bx, num_bins_x-1);
return bx;
}
inline int pos2bin_y(T yy) const
{
int by = (yy-yl)/bin_size_y;
by = std::max(by, 0);
by = std::min(by, num_bins_y-1);
return by;
}
inline void shift_box_to_layout(Box<T>& box) const
{
box.xl = std::max(box.xl, xl);
box.xl = std::min(box.xl, xh);
box.xh = std::max(box.xh, xl);
box.xh = std::min(box.xh, xh);
box.yl = std::max(box.yl, yl);
box.yl = std::min(box.yl, yh);
box.yh = std::max(box.yh, yl);
box.yh = std::min(box.yh, yh);
}
inline Box<int> box2sitebox(const Box<T>& box) const
{
// xh, yh are exclusive
Box<int> sitebox (
pos2site_x(box.xl),
pos2site_y(box.yl),
pos2site_ub_x(box.xh),
pos2site_ub_y(box.yh)
);
return sitebox;
}
inline Box<int> box2binbox(const Box<T>& box) const
{
Box<int> binbox (
pos2bin_x(box.xl),
pos2bin_y(box.yl),
pos2bin_x(box.xh),
pos2bin_y(box.yh)
);
return binbox;
}
/// @brief align x coordinate to site
inline T align2site(T xx) const
{
return floor((xx-xl)/site_width)*site_width+xl;
}
/// @brief compute optimal region for a cell
/// The method to compute optimal region ignores the pin offsets of the target cell.
/// If we want to consider the pin offsets, there may not be feasible box for the optimal region.
/// Thus, this is just an approximate optimal region.
/// When using the optimal region, one needs to refer to the center of the cell to the region, or the region completely covers the entire cell.
Box<T> compute_optimal_region(int node_id) const
{
Box<T> box (
std::numeric_limits<T>::max(),
std::numeric_limits<T>::max(),
-std::numeric_limits<T>::max(),
-std::numeric_limits<T>::max()
);
for (int node2pin_id = flat_node2pin_start_map[node_id]; node2pin_id < flat_node2pin_start_map[node_id+1]; ++node2pin_id)
{
int node_pin_id = flat_node2pin_map[node2pin_id];
int net_id = pin2net_map[node_pin_id];
if (net_mask[net_id])
{
for (int net2pin_id = flat_net2pin_start_map[net_id]; net2pin_id < flat_net2pin_start_map[net_id+1]; ++net2pin_id)
{
int net_pin_id = flat_net2pin_map[net2pin_id];
int other_node_id = pin2node_map[net_pin_id];
if (node_id != other_node_id)
{
box.xl = std::min(box.xl, x[other_node_id]+pin_offset_x[net_pin_id]);
box.xh = std::max(box.xh, x[other_node_id]+pin_offset_x[net_pin_id]);
box.yl = std::min(box.yl, y[other_node_id]+pin_offset_y[net_pin_id]);
box.yh = std::max(box.yh, y[other_node_id]+pin_offset_y[net_pin_id]);
}
}
}
}
shift_box_to_layout(box);
return box;
}
/// @brief compute HPWL for a net
T compute_net_hpwl(int net_id) const
{
Box<T> box (
std::numeric_limits<T>::max(),
std::numeric_limits<T>::max(),
-std::numeric_limits<T>::max(),
-std::numeric_limits<T>::max()
);
for (int net2pin_id = flat_net2pin_start_map[net_id]; net2pin_id < flat_net2pin_start_map[net_id+1]; ++net2pin_id)
{
int net_pin_id = flat_net2pin_map[net2pin_id];
int other_node_id = pin2node_map[net_pin_id];
box.xl = std::min(box.xl, x[other_node_id]+pin_offset_x[net_pin_id]);
box.xh = std::max(box.xh, x[other_node_id]+pin_offset_x[net_pin_id]);
box.yl = std::min(box.yl, y[other_node_id]+pin_offset_y[net_pin_id]);
box.yh = std::max(box.yh, y[other_node_id]+pin_offset_y[net_pin_id]);
}
if (box.xl == std::numeric_limits<T>::max() || box.yl == std::numeric_limits<T>::max())
{
return (T)0;
}
return (box.xh-box.xl) + (box.yh-box.yl);
}
/// @brief compute HPWL for all nets
T compute_total_hpwl() const
{
//dreamplacePrint(kDEBUG, "start compute_total_hpwl\n");
T total_hpwl = 0;
for (int net_id = 0; net_id < num_nets; ++net_id)
{
//if (net_mask[net_id])
{
total_hpwl += compute_net_hpwl(net_id);
}
}
//dreamplacePrint(kDEBUG, "end compute_total_hpwl\n");
return total_hpwl;
}
/// @brief distribute cells to rows
void make_row2node_map(const T* vx, const T* vy, std::vector<std::vector<int> >& row2node_map, int num_threads) const
{
// distribute cells to rows
for (int i = 0; i < num_nodes; ++i)
{
//T node_xl = vx[i];
T node_yl = vy[i];
//T node_xh = node_xl+node_size_x[i];
T node_yh = node_yl+node_size_y[i];
int row_idxl = (node_yl-yl)/row_height;
int row_idxh = ceil((node_yh-yl)/row_height)+1;
row_idxl = std::max(row_idxl, 0);
row_idxh = std::min(row_idxh, num_sites_y);
for (int row_id = row_idxl; row_id < row_idxh; ++row_id)
{
T row_yl = yl+row_id*row_height;
T row_yh = row_yl+row_height;
if (node_yl < row_yh && node_yh > row_yl) // overlap with row
{
row2node_map[row_id].push_back(i);
}
}
}
// sort cells within rows
// it is safer to sort by center
// sometimes there might be cells with 0 sizes
#ifdef _OPENMP
#pragma omp parallel for num_threads (num_threads) schedule(dynamic, 1)
#endif
for (int i = 0; i < num_sites_y; ++i)
{
auto& row2nodes = row2node_map[i];
// sort cells within rows according to left edges
std::sort(row2nodes.begin(), row2nodes.end(),
[&] (int node_id1, int node_id2) {
T x1 = vx[node_id1];
T x2 = vx[node_id2];
return x1 < x2 || (x1 == x2 && node_id1 < node_id2);
});
// After sorting by left edge,
// there is a special case for fixed cells where
// one fixed cell is completely within another in a row.
// This will cause failure to detect some overlaps.
// We need to remove the "small" fixed cell that is inside another.
if (!row2nodes.empty())
{
std::vector<int> tmp_nodes;
tmp_nodes.reserve(row2nodes.size());
tmp_nodes.push_back(row2nodes.front());
for (int j = 1, je = row2nodes.size(); j < je; ++j)
{
int node_id1 = row2nodes.at(j-1);
int node_id2 = row2nodes.at(j);
// two fixed cells
if (node_id1 >= num_movable_nodes && node_id2 >= num_movable_nodes)
{
T xl1 = vx[node_id1];
T xl2 = vx[node_id2];
T width1 = node_size_x[node_id1];
T width2 = node_size_x[node_id2];
T xh1 = xl1 + width1;
T xh2 = xl2 + width2;
// only collect node_id2 if its right edge is righter than node_id1
if (xh1 < xh2)
{
tmp_nodes.push_back(node_id2);
}
}
else
{
tmp_nodes.push_back(node_id2);
}
}
row2nodes.swap(tmp_nodes);
// sort according to center
std::sort(row2nodes.begin(), row2nodes.end(),
[&] (int node_id1, int node_id2) {
T x1 = vx[node_id1] + node_size_x[node_id1]/2;
T x2 = vx[node_id2] + node_size_x[node_id2]/2;
return x1 < x2 || (x1 == x2 && node_id1 < node_id2);
});
}
}
}
/// @brief distribute movable cells to bins
void make_bin2node_map(const T* host_x, const T* host_y,
const T* host_node_size_x, const T* host_node_size_y,
std::vector<std::vector<int> >& bin2node_map, std::vector<BinMapIndex>& node2bin_map) const
{
// construct bin2node_map
for (int i = 0; i < num_movable_nodes; ++i)
{
int node_id = i;
T node_x = host_x[node_id] + host_node_size_x[node_id]/2;
T node_y = host_y[node_id] + host_node_size_y[node_id]/2;
int bx = std::min(std::max((int)((node_x-xl)/bin_size_x), 0), num_bins_x-1);
int by = std::min(std::max((int)((node_y-yl)/bin_size_y), 0), num_bins_y-1);
int bin_id = bx*num_bins_y+by;
//int sub_id = bin2node_map.at(bin_id).size();
bin2node_map.at(bin_id).push_back(node_id);
}
// construct node2bin_map
for (unsigned int bin_id = 0; bin_id < bin2node_map.size(); ++bin_id)
{
for (unsigned int sub_id = 0; sub_id < bin2node_map[bin_id].size(); ++sub_id)
{
int node_id = bin2node_map[bin_id][sub_id];
BinMapIndex& bm_idx = node2bin_map.at(node_id);
bm_idx.bin_id = bin_id;
bm_idx.sub_id = sub_id;
}
}
#ifdef DEBUG
int max_num_nodes_per_bin = 0;
for (unsigned int i = 0; i < bin2node_map.size(); ++i)
{
max_num_nodes_per_bin = std::max(max_num_nodes_per_bin, (int)bin2node_map[i].size());
}
printf("[D] max_num_nodes_per_bin = %d\n", max_num_nodes_per_bin);
#endif
}
/// @brief check whether placement is legal
bool check_legality() const
{
return legalityCheckKernelCPU(
x, y,
node_size_x, node_size_y,
flat_region_boxes, flat_region_boxes_start, node2fence_region_map,
xl, yl, xh, yh,
site_width, row_height,
num_nodes,
num_movable_nodes,
num_regions
);
}
/// @brief check whether a cell is within its fence region
bool inside_fence(int node_id, T xx, T yy) const
{
T node_xl = xx;
T node_yl = yy;
T node_xh = node_xl + node_size_x[node_id];
T node_yh = node_yl + node_size_y[node_id];
bool legal_flag = true;
int region_id = node2fence_region_map[node_id];
if (region_id < num_regions)
{
int box_bgn = flat_region_boxes_start[region_id];
int box_end = flat_region_boxes_start[region_id + 1];
T node_area = (node_xh - node_xl) * (node_yh - node_yl);
// I assume there is no overlap between boxes of a region
// otherwise, preprocessing is required
for (int box_id = box_bgn; box_id < box_end; ++box_id)
{
int box_offset = box_id*4;
T box_xl = flat_region_boxes[box_offset];
T box_yl = flat_region_boxes[box_offset + 1];
T box_xh = flat_region_boxes[box_offset + 2];
T box_yh = flat_region_boxes[box_offset + 3];
T dx = std::max(std::min(node_xh, box_xh) - std::max(node_xl, box_xl), (T)0);
T dy = std::max(std::min(node_yh, box_yh) - std::max(node_yl, box_yl), (T)0);
T overlap = dx*dy;
if (overlap > 0)
{
node_area -= overlap;
}
}
if (node_area > 0) // not consumed by boxes within a region
{
legal_flag = false;
}
}
return legal_flag;
}
/// @brief draw placement
void draw_place(const char* filename) const
{
drawPlaceLauncher<T>(
x, y,
node_size_x, node_size_y,
pin_offset_x, pin_offset_y,
pin2node_map,
num_nodes,
num_movable_nodes,
0,
flat_net2pin_start_map[num_nets],
xl, yl, xh, yh,
site_width, row_height,
bin_size_x, bin_size_y,
filename
);
}
};
DREAMPLACE_END_NAMESPACE
#endif
|
GB_unaryop__abs_uint8_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint8_int16
// op(A') function: GB_tran__abs_uint8_int16
// C type: uint8_t
// A type: int16_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT8 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint8_int16
(
uint8_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint8_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
surrogate.h | /* Software SPAMS v2.4 - Copyright 2009-2013 Julien Mairal
*
* This file is part of SPAMS.
*
* SPAMS is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* SPAMS is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with SPAMS. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef SURROGATE_H
#define SURROGATE_H
#include <fista.h>
Timer timer;
Timer timer2;
Timer timer3;
using namespace FISTA;
template <typename T> struct ParamSurrogate {
ParamSurrogate() {
num_threads=1;
iters=100;
epochs=100;
minibatches=1;
normalized=false;
weighting_mode=0;
averaging_mode=0;
determineEta=true;
eta=1;
t0=0;
verbose=false;
random=false;
optimized_solver=true;
strategy=0;
};
~ParamSurrogate() { };
void print() const {
if (random) cerr << "Randomized Sampling" << endl;
if (minibatches > 1) cerr << "Mini-batches of size " << minibatches << endl;
if (averaging_mode) cerr << "With averaging" << endl;
cerr << "Weighting Scheme " << weighting_mode << endl;
};
int num_threads;
int iters;
int epochs;
int minibatches;
bool normalized;
int weighting_mode;
int averaging_mode;
bool determineEta;
bool optimized_solver;
T eta;
T t0;
bool verbose;
bool random;
int strategy;
};
template <typename T, typename U>
class SmoothFunction {
public:
SmoothFunction(const U& Xt, const Vector<T>& y,
const bool is_normalized=false,
const int nbatches=1,
const bool random = false, const T genericL= T(1.0)) :
_Xt(&Xt), _y(&y), _is_normalized(is_normalized),
_n(Xt.n()),
_p(Xt.m()),
_nbatches(nbatches),
_sizebatch(nbatches),
_random(random),
_counter(0),
_save_n(Xt.n()),
_genericL(genericL),
_constantL(is_normalized) {
_current_batch.resize(nbatches);
_num_batches = MAX(_n /_nbatches,1);
if (!is_normalized) {
_L.resize(_n);
typename U::col spw;
for (int i = 0; i<_n; ++i) {
_Xt->refCol(i,spw);
_L[i]=_genericL*spw.nrm2sq();
}
_genericL = _L.sum()/this->_n;
}
};
virtual ~SmoothFunction() { };
/// evaluate the function value on the full dataset
inline T eval(const Vector<T>& input) const {
typename U::col spw;
T tmp=0;
for (int i = 0; i<_n; ++i) {
_Xt->refCol(i,spw);
const T y= (*_y)[i];
const T s = input.dot(spw);
tmp += this->eval_simple(y,s);
}
return tmp / _n;
};
inline T eval_block(const Vector<T>& input) const {
typename U::col spw;
T tmp=0;
for (int i = 0; i<_sizebatch; ++i) {
const int ind = _current_batch[i];
_Xt->refCol(ind,spw);
const T y= (*_y)[ind];
const T s = input.dot(spw);
tmp += this->eval_simple(y,s);
}
return tmp;
};
// rho = (1-w)rho + w (rho_sample)
// output = (1-w)output + w(input - (1/rho) nabla(input))
inline void add_sample_gradient(const Vector<T>& input,
Vector<T>& z, T& rho, const T w) {
// update new_rho => (1-w)rho + w (rho_sample)
if (_is_normalized) {
// rho is already equal to genericL
typename U::col spw;
z.scal(T(1.0)-w);
z.add(input,w);
const T scal = w/(rho*_sizebatch);
for (int i = 0; i<_sizebatch; ++i) {
const int ind = _current_batch[i];
_Xt->refCol(ind,spw);
const T y= (*_y)[ind];
const T s = input.dot(spw);
z.add(spw,-scal*this->gradient_simple(y,s));
}
} else {
T rho_sample=0;
for (int i = 0; i<_sizebatch; ++i) {
rho_sample += _L[_current_batch[i]];
}
rho_sample /= _sizebatch;
rho_sample=MAX(rho_sample,T(0.1)*rho);
const T new_rho=(1-w)*rho + w*rho_sample;
//const T new_rho=rho;
typename U::col spw;
z.scal(rho*(T(1.0)-w)/new_rho);
z.add(input,rho_sample*w/new_rho);
const T scal = w/(_sizebatch*new_rho);
for (int i = 0; i<_sizebatch; ++i) {
const int ind = _current_batch[i];
_Xt->refCol(ind,spw);
const T y= (*_y)[ind];
const T s = input.dot(spw);
z.add(spw,-scal*this->gradient_simple(y,s));
}
rho=new_rho;
}
};
inline void add_sample_gradient2(const Vector<T>& input,
Vector<T>& z, const T rho) {
typename U::col spw;
z.copy(input);
z.scal(rho);
for (int i = 0; i<_sizebatch; ++i) {
const int ind = _current_batch[i];
_Xt->refCol(ind,spw);
const T y= (*_y)[ind];
const T s = input.dot(spw);
z.add(spw,-this->gradient_simple(y,s));
}
};
virtual void add_sample_gradient3(const Vector<T>& input,
Vector<T>& output1, Vector<T>& output2, T& val) {
typename U::col spw;
val=0;
for (int i = 0; i<_sizebatch; ++i) {
const int ind = _current_batch[i];
_Xt->refCol(ind,spw);
const T y= (*_y)[ind];
const T s = input.dot(spw);
val+=this->eval_simple(y,s);
const T s2 = -this->gradient_simple(y,s);
output1.add(spw,s2-output2[ind]);
output2[ind]=s2;
}
};
void inline add_scal_grad(Vector<T>& input, const T scalin, const T scal, Vector<T>& av, const T scal2 = 0) {
_tmp.resize(_sizebatch);
typename U::col spw;
if (_sizebatch==1) {
const int ind = _current_batch[0];
_Xt->refCol(ind,spw);
const T s = scalin*input.dot(spw);
const T y= (*_y)[ind];
const T s2 = this->gradient_simple(y,s);
input.add(spw,scal*s2);
if (scal2) av.add(spw,scal2*s2);
} else {
for (int i = 0; i<_sizebatch; ++i) {
const int ind = _current_batch[i];
_Xt->refCol(ind,spw);
_tmp[i] = scalin*input.dot(spw);
}
const T scalb = scal/_sizebatch;
const T scal2b = scal2/_sizebatch;
for (int i = 0; i<_sizebatch; ++i) {
const int ind = _current_batch[i];
const T y= (*_y)[ind];
_Xt->refCol(ind,spw);
const T s2 = this->gradient_simple(y,_tmp[i]);
input.add(spw,scalb*s2);
if (scal2) av.add(spw,scal2b*s2);
}
}
};
inline T scal_grad(const T s) {
const int ind = _current_batch[0];
const T y= (*_y)[ind];
return this->gradient_simple(y,s);
}
virtual T dotprod_gradient3(const Vector<T>& input,
Vector<T>& stats) {
typename U::col spw;
T val=0;
for (int i = 0; i<_sizebatch; ++i) {
const int ind = this->_current_batch[i];
_Xt->refCol(ind,spw);
val-= stats[ind]*spw.dot(input);
}
return val;
};
virtual T eval_simple(const T y, const T s) const = 0;
virtual T gradient_simple(const T y, const T s) const = 0;
/// compute a global constant L
inline T genericL() const { return _genericL; };
virtual void refData(typename U::col& output) {
const int ind = _current_batch[0];
_Xt->refCol(ind,output);
}
virtual T getY() const {
const int ind = _current_batch[0];
return (*_y)[ind];
}
/// subsample the dataset
virtual void subsample(const int n) {
_save_n=_n;
_counter=0;
_n = MIN(n,_n);
_num_batches = MAX(_n /_nbatches,1);
};
/// restore the full dataset
virtual void un_subsample() {
_n = _save_n;
_counter=0;
_num_batches = MAX(_n /_nbatches,1);
};
inline int n() const { return _n; };
inline int p() const { return _p; };
inline bool constantL() const { return _constantL; };
void inline setRandom(const bool random) { _random=random;};
void inline setMiniBatches(const int nbatches) {
_nbatches=nbatches;
_sizebatch=_nbatches;
_current_batch.resize(nbatches);
_num_batches = MAX(_n /_nbatches,1);
};
int inline nbatches() const { return _nbatches; };
int inline num_batches() const { return _num_batches; };
int inline num_batch() const { return _num_batch; };
/// choose a new sample
void inline choose_random_batch() {
if (_random) {
for (int i = 0; i<_nbatches; ++i) // size of the mini batches
_current_batch[i]=random() % _n;
} else {
for (int i = 0; i<_nbatches; ++i)
_current_batch[i]= _counter++ % _n;
}
};
virtual int get_batch() const { return _current_batch[0]; };
int inline choose_random_fixedbatch() {
const int size_lastbatch =_nbatches+(_n-_nbatches*_num_batches);
_num_batch= (_random ? random() : _counter++) % _num_batches;
_current_batch.resize(size_lastbatch);
_sizebatch = _num_batch == _num_batches-1 ? size_lastbatch : _nbatches;
for (int i = 0; i<_sizebatch; ++i) // size of the mini batches
_current_batch[i]=_num_batch*_nbatches+i;
return _num_batch;
};
void inline getL(Vector<T>& stats) {
if (_constantL) {
for (int i = 0; i<_num_batches-1; ++i)
stats[i]=_genericL*_nbatches;
const int size_lastbatch =_nbatches+(_n-_nbatches*_num_batches);
stats[_num_batches-1]=_genericL*size_lastbatch;
} else {
for (int i = 0; i<_num_batches; ++i) {
stats[i]=0;
for (int j = 0; j<_nbatches; ++j) {
stats[i]+=_L[i*_nbatches+j];
}
}
const int size_lastbatch =_nbatches+(_n-_nbatches*_num_batches);
for (int j = _nbatches; j<size_lastbatch; ++j)
stats[_num_batches-1]+=_L[(_num_batches-1)*_nbatches+j];
}
};
/// compute the constant L for the current sample
inline T sampleL() const {
T rho_sample=0;
for (int i = 0; i<_sizebatch; ++i) {
rho_sample += _L[_current_batch[i]];
}
return rho_sample / _sizebatch;
};
private:
explicit SmoothFunction<T,U>(const SmoothFunction<T,U>& dict);
SmoothFunction<T,U>& operator=(const SmoothFunction<T,U>& dict);
protected:
const U* _Xt;
const Vector<T>* _y;
T _is_normalized;
int _n;
int _p;
int _nbatches;
int _sizebatch;
bool _random;
int _counter;
int _save_n;
T _genericL;
bool _constantL;
int _num_batches;
int _num_batch;
Vector<T> _L;
Vector<T> _tmp;
Vector<int> _current_batch;
};
template <typename T, typename U>
class LogisticFunction : public SmoothFunction<T, U > {
public:
LogisticFunction(const U& Xt, const Vector<T>& y,
const bool is_normalized=false, const int nbatches=1, const bool random = false) :
SmoothFunction<T,U>(Xt,y,is_normalized,nbatches,random,T(0.25)) { };
virtual ~LogisticFunction() { };
virtual T inline eval_simple(const T y, const T s) const {
return logexp2<T>(-y*s);
};
virtual T inline gradient_simple(const T y, const T s) const {
return -y/(T(1.0)+exp_alt<T>(y*s));
};
private:
explicit LogisticFunction<T,U>(const LogisticFunction<T,U>& dict);
LogisticFunction<T,U>& operator=(const LogisticFunction<T,U>& dict);
};
template <typename T, typename U>
class SquareFunction : public SmoothFunction<T, U > {
public:
SquareFunction(const U& Xt, const Vector<T>& y,
const bool is_normalized=false, const int nbatches=1, const bool random = false) :
SmoothFunction<T,U>(Xt,y,is_normalized,nbatches,random,T(1.0)) { };
virtual ~SquareFunction() { };
virtual T inline eval_simple(const T y, const T s) const {
return T(0.5)*(y-s)*(y-s);
};
virtual T inline gradient_simple(const T y, const T s) const {
return s-y;
};
private:
explicit SquareFunction<T,U>(const SquareFunction<T,U>& dict);
SquareFunction<T,U>& operator=(const SquareFunction<T,U>& dict);
};
// T is double or float
// D is the input type (vector or matrices)
// U is the data type (Matrix or SpMatrix)
template <typename T, typename U>
class OnlineSurrogate {
public:
OnlineSurrogate() { };
virtual ~OnlineSurrogate() { };
virtual void update_surrogate(const Vector<T>& input, const T weight) = 0;
virtual T eval_function(const Vector<T>& input) = 0;
virtual void minimize_surrogate(Vector<T>& output) = 0;
virtual void subsample(const int n) = 0;
virtual void un_subsample() = 0;
virtual void initialize(const Vector<T>& input) = 0;
virtual int n() const = 0;
virtual int num_batches() const = 0;
virtual void setRandom(const bool random) = 0;
virtual void print_aux() { };
private:
explicit OnlineSurrogate<T,U>(const OnlineSurrogate<T,U>& dict);
OnlineSurrogate<T,U>& operator=(const OnlineSurrogate<T,U>& dict);
};
template <typename T, typename U>
class IncrementalSurrogate : public OnlineSurrogate<T,U> {
public:
IncrementalSurrogate() : _first_pass(true) { };
virtual ~IncrementalSurrogate() { };
virtual void update_incremental_surrogate(const Vector<T>& input) = 0;
virtual void initialize_incremental(const Vector<T>& input, const int strategy = 0) = 0;
virtual void minimize_incremental_surrogate(Vector<T>& output) = 0;
virtual T get_param() const = 0;
virtual T rho() const = 0;
virtual void set_param(const T param) = 0;
virtual void set_param_strong_convexity() { };
inline void setFirstPass(const bool pass) { _first_pass = pass; };
virtual T get_diff() const = 0;
virtual T get_scal_diff() const = 0; //{ return 0; };
virtual void reset_diff() =0 ;
protected:
int _strategy;
bool _first_pass;
private:
explicit IncrementalSurrogate<T,U>(const IncrementalSurrogate<T,U>& dict);
IncrementalSurrogate<T,U>& operator=(const IncrementalSurrogate<T,U>& dict);
};
template <typename T, typename U>
class QuadraticSurrogate : public IncrementalSurrogate<T,U> {
public:
QuadraticSurrogate(SmoothFunction<T,U>* function) :
_function(function) { _rho=T(1.0); _z.resize(function->p()); _scalL=T(1.0); }
virtual ~QuadraticSurrogate() { };
virtual void inline update_surrogate(const Vector<T>& input, const T weight) {
_function->choose_random_batch();
// z = (1-w)output + w(input - (1/rho) nabla(input))
_function->add_sample_gradient(input,_z,_rho,weight);
};
virtual T eval_function(const Vector<T>& input) {
return _function->eval(input);
};
virtual void minimize_surrogate(Vector<T>& output) {
output.copy(_z);
};
virtual void initialize(const Vector<T>& input) {
this->_rho = _function->genericL();
_z.copy(input);
};
virtual void subsample(const int n) { _function->subsample(n); };
virtual void un_subsample() { _function->un_subsample(); };
virtual int n() const { return _function->n(); };
virtual T rho() const { return _rho; };
virtual int num_batches() const { return _function->num_batches(); };
virtual void setRandom(const bool random) { _function->setRandom(random); };
/// incremental part
virtual void update_incremental_surrogate(const Vector<T>& input) {
const int num_batch = _function->choose_random_fixedbatch();
Vector<T> z_old;
_stats2.refCol(num_batch,z_old);
const T rho_old=_stats[num_batch];
if (this->_strategy <= 2 || this->_strategy == 4) {
//if (!this->_first_pass)
_z.sub(z_old);
_function->add_sample_gradient2(input,z_old,rho_old*_scalL);
_z.add(z_old);
} else {
T old_value=0;
T old_valueb=0;
if (!this->_first_pass) {
_z.add(z_old,-rho_old);
_z3.copy(input);
_z3.sub(z_old);
//old_value = _stats4[num_batch] + _function->dotprod_gradient3(_z3,_stats3)+0.5*rho_old*_scalL*_z3.nrm2sq();
old_value = _stats4[num_batch] + _function->dotprod_gradient3(_z3,_stats3); // f_old+ nabla f(old)'( new-old)
old_valueb=T(0.5)*rho_old*_z3.nrm2sq();
}
z_old.copy(input);
_z.add(z_old,rho_old);
_function->add_sample_gradient3(input,_z2,_stats3,_stats4[num_batch]);
if (!this->_first_pass) {
_diff += (old_value-_stats4[num_batch]); // should be non-positive (convexity inequality)
_diffb+=old_valueb;
// _diff += (old_value-_stats4[num_batch] > 0 ? T(1.0) : -T(1.0));
}
}
};
virtual void initialize_incremental(const Vector<T>& input, const int strategy) {
const int p = static_cast<int>(input.n());
this->_strategy = strategy;
// _z.resize(p);
// _z.setZeros();
this->_rho = _function->n()*_function->genericL();
_z.copy(input);
_z.scal(_rho*_scalL);
const int num_batches = _function->num_batches();
const int n = _function->n();
this->_stats.resize(num_batches);
_function->getL(this->_stats);
this->_stats2.resize(p,num_batches,false);
Vector<T> col;
for (int i = 0; i<num_batches; ++i) {
this->_stats2.refCol(i,col);
col.copy(_z);
col.scal(T(1.0)/num_batches);
}
if (strategy == 3) {
this->_stats3.resize(n);
this->_stats3.setZeros();
this->_stats4.resize(num_batches);
this->_stats4.setZeros();
_z2.resize(p);
_z2.setZeros();
}
};
virtual void minimize_incremental_surrogate(Vector<T>& output) {
if (this->_strategy <= 2 || this->_strategy==4) {
output.copy(_z);
output.scal(T(1.0)/(_scalL*_rho));
} else {
output.copy(_z);
output.add(_z2,T(1.0)/_scalL);
output.scal(T(1.0)/(_rho));
}
};
inline T get_param() const { return _scalL; };
inline void set_param(const T param) { _scalL = param; };
virtual T get_scal_diff() const {
return -(_diff/_diffb)/_scalL;
};
virtual T get_diff() const {
return _diff+_scalL*_diffb;
};
virtual void reset_diff() { _diff=0; _diffb=0; };
private:
explicit QuadraticSurrogate<T,U>(const QuadraticSurrogate<T,U>& dict);
QuadraticSurrogate<T,U>& operator=(const QuadraticSurrogate<T,U>& dict);
protected:
SmoothFunction<T,U>* _function; // contains all the data
T _rho;
Vector<T> _z;
Vector<T> _z2;
Vector<T> _z3;
T _scalL;
Vector<T> _stats; // rho_i
Matrix<T> _stats2; // Theta_i (rho_i Theta_i - nabla f_i if strategy <= 2)
Vector<T> _stats3; // -nabla f_i not used if strategy <= 2
Vector<T> _stats4; // last surrogate constant not used if strategy <= 2
T _diff;
T _diffb;
};
template <typename T, typename U>
class ProximalSurrogate : public QuadraticSurrogate<T,U> {
public:
ProximalSurrogate(SmoothFunction<T,U>* function,
Regularizer<T>* prox, const T lambda) :
QuadraticSurrogate<T,U>(function),
_prox(prox), _lambda(lambda) { };
virtual T eval_function(const Vector<T>& input) {
return this->_function->eval(input)+_lambda*_prox->eval(input);
};
virtual void minimize_surrogate(Vector<T>& output) {
_prox->prox(this->_z,output,_lambda/this->_rho);
};
virtual void minimize_incremental_surrogate(Vector<T>& output) {
const int n = this->_function->n();
_prox->linearize(output);
if (this->_strategy <= 2 || this->_strategy==4) {
if (_prox->id() == RIDGE) {
output.add_scal(this->_z,T(1.0)/(this->_scalL*this->_rho+n*_lambda),0);
} else if (_prox->id() == L1) {
this->_z.softThrsholdScal(output,n*_lambda,T(1.0)/(this->_scalL*this->_rho));
} else {
Vector<T>& tmp = this->_z3;
tmp.copy(this->_z);
tmp.scal(T(1.0)/(this->_scalL*this->_rho));
_prox->prox(tmp,output,n*_lambda/(this->_scalL*this->_rho));
}
} else {
if (_prox->id() == RIDGE) {
output.copy(this->_z);
const T s = T(1.0)/(this->_rho+n*_lambda/this->_scalL);
output.add_scal(this->_z2,s/this->_scalL,s);
} else if (_prox->id() == L1) {
output.copy(this->_z);
output.add(this->_z2,T(1.0)/this->_scalL);
output.softThrsholdScal(output,n*_lambda/(this->_scalL),T(1.0)/(this->_rho));
} else {
Vector<T>& tmp = this->_z3;
tmp.copy(this->_z);
tmp.add_scal(this->_z2,T(1.0)/(this->_rho*this->_scalL),T(1.0)/(this->_rho));
_prox->prox(tmp,output,n*_lambda/(this->_scalL*this->_rho));
}
}
};
void inline changeLambda(const T lambda) { _lambda=lambda;};
inline void set_param_strong_convexity() { this->_scalL = _prox->id() == RIDGE ? this->_function->n()*this->_lambda/this->_rho: 0; };
private:
explicit ProximalSurrogate<T,U>(const ProximalSurrogate<T,U>& dict);
ProximalSurrogate<T,U>& operator=(const ProximalSurrogate<T,U>& dict);
Regularizer<T>* _prox;
T _lambda;
};
/*template <typename T, typename U>
class LogRegulSurrogate : public QuadraticSurrogate<T,U> {
public:
LogRegulSurrogate(SmoothFunction<T,U>* function,
const T eps, const T lambda) :
QuadraticSurrogate<T,U>(function),
_eps(eps),
_lambda(lambda) { };
virtual T eval_function(const Vector<T>& input) {
T tmp=0;
for (int i=0; i<input.n(); ++i) tmp+= log_alt<T>(abs<T>(input[i])+_eps);
return this->_function->eval(input)+_lambda*tmp;
};
virtual void minimize_surrogate(Vector<T>& output) {
//_prox->prox(this->_z,output,_lambda/this->_rho);
};
virtual void minimize_incremental_surrogate(Vector<T>& output) {
const int n = this->_function->n();
if (this->_strategy <= 2 || this->_strategy==4) {
// Vector<T>& tmp = this->_z3;
// tmp.copy(this->_z);
// tmp.scal(T(1.0)/(this->_scalL*this->_rho));
// _prox->prox(tmp,output,n*_lambda/(this->_scalL*this->_rho));
} else {
// Vector<T>& tmp = this->_z3;
// tmp.copy(this->_z);
// tmp.add_scal(this->_z2,T(1.0)/(this->_rho*this->_scalL),T(1.0)/(this->_rho));
// _prox->prox(tmp,output,n*_lambda/(this->_scalL*this->_rho));
}
};
void inline changeLambda(const T lambda) { _lambda=lambda;};
private:
explicit LogRegulSurrogate<T,U>(const LogRegulSurrogate<T,U>& dict);
LogRegulSurrogate<T,U>& operator=(const LogRegulSurrogate<T,U>& dict);
T _eps;
T _lambda;
};*/
template <typename T, typename U>
class StochasticSolver {
public:
StochasticSolver() { };
StochasticSolver(OnlineSurrogate<T,U>& surrogate, const
ParamSurrogate<T>& param) : _eta(param.eta), _t0(param.t0),
_minibatches(param.minibatches),
_weighting_mode(param.weighting_mode),
_surrogate(&surrogate) {
_logs.resize(3);
};
virtual void solve(const Vector<T>& w0, Vector<T>& w, Vector<T>& wav, const int iters = 0, const
bool auto_params = true, const int averaging_mode = 0, const bool
verbose = false, const bool evaluate = true);
virtual void getLogs(Vector<T>& logs) { logs.copy(_logs); };
virtual int n() const { return _surrogate->n(); };
private:
explicit StochasticSolver<T,U>(const StochasticSolver<T,U>& dict);
StochasticSolver<T,U>& operator=(const StochasticSolver<T,U>& dict);
protected:
void auto_parameters(const Vector<T>& w0, Vector<T>& w, Vector<T>& wav, const int averaging_mode = 0);
void t0_to_eta() {
switch (_weighting_mode) {
case 0 : _eta=_t0+1; break;
case 1 : _eta=sqr_alt<T>(_t0+1); break;
case 2 : _eta=power<T>(_t0+1,0.75); break;
default: break;
}
};
T t_to_weight(const int t) const {
switch (_weighting_mode) {
case 0: return _eta/(static_cast<T>(t)+_t0);
case 1: return _eta/sqr_alt<T>(static_cast<T>(t)+_t0);
case 2: return _eta/power<T>(static_cast<T>(t)+_t0,0.75);
default: return t==1 ? T(1.0) : _eta;
}
};
virtual void subsample(const int newn) { _surrogate->subsample(newn); };
virtual void un_subsample() { _surrogate->un_subsample(); };
T _eta;
T _t0;
Vector<T> _logs;
int _minibatches;
int _weighting_mode;
OnlineSurrogate<T,U>* _surrogate;
};
template <typename T, typename U>
void StochasticSolver<T,U>::auto_parameters(const Vector<T>& w0, Vector<T>& w, Vector<T>& wav, const int averaging_mode) {
const int newn= this->n()/20;
const int iters = ceil(static_cast<T>(this->n()/(20*_minibatches)));
/// inspired from bottou's determineta0 function
T factor = 0.5;
T lo_t0 = _t0;
t0_to_eta();
//const int ind_res=averaging_mode ? 1 : 0;
const int ind_res=0;
this->subsample(newn);
this->solve(w0,w,w,iters,false,0,false);
T loCost = _logs[ind_res];
//cerr << _logs[0] << " ";
// try to reduce
for (int t = 0; t<15; ++t)
{
T hi_t0 = lo_t0* factor;
if (hi_t0 < 1 || hi_t0 > 10000000) break;
_t0 = hi_t0;
t0_to_eta();
this->solve(w0,w,w,iters,false,0,false);
T hiCost = _logs[ind_res];
//cerr << _logs[0] << " ";
if (hiCost > loCost && t==0) {
factor=2.0;
} else {
if (hiCost >= loCost) break;
lo_t0=hi_t0;
loCost=hiCost;
}
}
//cerr << endl;
_t0 = lo_t0;
t0_to_eta();
//cerr << "t0: " << _t0 << " eta: " << _eta << endl;
this->un_subsample();
};
template <typename T, typename U>
void StochasticSolver<T,U>::solve(const Vector<T>& w0, Vector<T>& w, Vector<T>& wav, const int iters,
const bool auto_params, const int averaging_mode, const bool verbose, const bool evaluate) {
if (verbose && iters > 0) {
cout << "Standard Proximal Solver" << endl;
if (auto_params)
cout << "Automatic Parameters Adjustment" << endl;
}
Timer time;
time.start();
_logs[2]=0;
w.copy(w0);
if (averaging_mode) wav.copy(w0);
if (auto_params && iters > 0) this->auto_parameters(w0,w,wav,averaging_mode);
_surrogate->initialize(w0);
T tmpweight=0;
for (int i = 1; i<= iters; ++i) {
const T weight = t_to_weight(i);
_surrogate->update_surrogate(w,weight);
_surrogate->minimize_surrogate(w);
//if (i < 5)
// _surrogate->print_aux();
switch (averaging_mode) {
case 1: wav.scal(T(1.0)-weight); wav.add(w,weight); break;
case 2: tmpweight+=weight; wav.scal(T(1.0)-weight/tmpweight); wav.add(w,weight/tmpweight); break;
default: break;
};
}
time.stop();
if (evaluate) {
_logs[0]=_surrogate->eval_function(w);
if (averaging_mode) _logs[1]=_surrogate->eval_function(wav);
}
_logs[2]=time.getElapsed();
if (verbose && evaluate) {
time.printElapsed();
cout << "Result without averaging after " << iters << " iterations, cost = " << this->_logs[0] << endl;
if (averaging_mode) cout << "Result with averaging after " << iters << " iterations, cost = " << this->_logs[1] << endl;
}
};
template <typename T, typename U>
class StochasticSmoothRidgeSolver : public StochasticSolver<T,U> {
public:
StochasticSmoothRidgeSolver(SmoothFunction<T,U>& function, const T lambda,
const ParamSurrogate<T>& param) : _function(&function), _lambda(lambda) {
this->_eta=param.eta;
this->_t0=param.t0;
this->_minibatches=param.minibatches;
this->_weighting_mode=param.weighting_mode;
this->_logs.resize(3);
};
virtual void solve(const Vector<T>& w0, Vector<T>& w, Vector<T>& wav, const int iters = 0, const
bool auto_params = true, const int averaging_mode = 0, const bool
verbose = false, const bool evaluate = true);
protected:
virtual void subsample(const int newn) { _function->subsample(newn); };
virtual void un_subsample() { _function->un_subsample(); };
virtual int n() const { return _function->n(); };
private:
explicit StochasticSmoothRidgeSolver<T,U>(const StochasticSmoothRidgeSolver<T,U>& dict);
StochasticSmoothRidgeSolver<T,U>& operator=(const StochasticSmoothRidgeSolver<T,U>& dict);
SmoothFunction<T,U>* _function;
T _lambda;
};
template <typename T,typename U>
void StochasticSmoothRidgeSolver<T,U>::solve(const Vector<T>& w0, Vector<T>& w, Vector<T>& wav, const int iters,
const bool auto_params, const int averaging_mode, const bool verbose, const bool evaluate) {
Timer time;
time.start();
if (verbose && iters > 0) {
cout << "Dedicated L2 Solver" << endl;
if (auto_params)
cout << "Automatic Parameters Adjustment" << endl;
}
this->_logs[2]=0;
w.copy(w0);
if (averaging_mode) wav.copy(w0);
if (auto_params && iters > 0) this->auto_parameters(w0,w,wav,averaging_mode);
T rho=_function->genericL();
T alpha = T(1.0);
T beta = T(1.0);
T gamma = 0;
bool first_averaging=true;
for (int t = 1; t<= iters; ++t) {
T weight=this->t_to_weight(t);
_function->choose_random_batch();
if (!_function->constantL()) rho = (T(1.0)-weight)*rho + weight*_function->sampleL();
const T one_minus_kappa=_lambda/(rho+_lambda);
T newalpha = alpha*(T(1.0)-weight*one_minus_kappa);
const T scal=-weight/((rho+_lambda)*newalpha);
if (averaging_mode && weight < T(0.5)) {
if (first_averaging) {
first_averaging=false;
wav.copy(w);
wav.scal(alpha);
} else {
beta*=(1-weight);
gamma=(T(1.0)-weight)*gamma*alpha/newalpha;
const T scal2=-weight*gamma/((rho+_lambda)*beta);
gamma -= weight;
_function->add_scal_grad(w,alpha,scal,wav,scal2); // x <- x + scal * nabla f( alpha x)
}
} else {
_function->add_scal_grad(w,alpha,scal,wav); // x <- x + scal * nabla f( alpha x)
}
alpha=newalpha;
if (alpha < 1e-10) {
w.scal(alpha);
alpha=T(1.0);
}
if (beta < 1e-10) {
wav.scal(beta);
beta=T(1.0);
}
}
w.scal(alpha);
if (averaging_mode) {
wav.scal(beta);
wav.add(w,-gamma);
}
time.stop();
if (evaluate) {
this->_logs[0]=_function->eval(w)+0.5*_lambda*w.nrm2sq();
if (averaging_mode) this->_logs[1]=_function->eval(wav)+0.5*_lambda*wav.nrm2sq();
}
this->_logs[2]=time.getElapsed();
if (verbose && evaluate) {
time.printElapsed();
cout << "Result without averaging after " << iters << " iterations, cost = " << this->_logs[0] << endl;
if (averaging_mode) cout << "Result with averaging after " << iters << " iterations, cost = " << this->_logs[1] << endl;
}
}
template <typename T>
class StochasticSmoothL1Solver : public StochasticSolver<T,SpMatrix<T> > {
public:
StochasticSmoothL1Solver(SmoothFunction<T,SpMatrix<T> >& function, const T lambda,
const ParamSurrogate<T>& param) : _function(&function), _lambda(lambda) {
this->_eta=param.eta;
this->_t0=param.t0;
this->_minibatches=param.minibatches;
this->_weighting_mode=param.weighting_mode;
this->_logs.resize(3);
};
virtual void solve(const Vector<T>& w0, Vector<T>& w, Vector<T>& wav, const int iters = 0, const
bool auto_params = true, const int averaging_mode = 0, const bool
verbose = false, const bool evaluate = true);
protected:
virtual void subsample(const int newn) { _function->subsample(newn); };
virtual void un_subsample() { _function->un_subsample(); };
virtual int n() const { return _function->n(); };
private:
explicit StochasticSmoothL1Solver<T>(const StochasticSmoothL1Solver<T>& dict);
StochasticSmoothL1Solver<T>& operator=(const StochasticSmoothL1Solver<T>& dict);
SmoothFunction<T, SpMatrix<T> >* _function;
T _lambda;
};
template <typename T>
void StochasticSmoothL1Solver<T>::solve(const Vector<T>& w0, Vector<T>& w, Vector<T>& wav, const int iters,
const bool auto_params, const int averaging_mode, const bool verbose, const bool evaluate) {
Timer time;
time.start();
w.copy(w0);
this->_logs[2]=0;
if (verbose && iters > 0)
cout << "Dedicated L1-Sparse Solver" << endl;
if (auto_params && iters > 0) this->auto_parameters(w0,w,wav,averaging_mode);
const int n = _function->n();
const int p = _function->p();
const T rho=_function->genericL();
Vector<T> sumw(n);
sumw[0]=0;
Vector<T> prod(n);
prod[0]=T(1.0);
SpVector<T> col;
Triplet<T,T,int>* pr_t = new Triplet<T,T,int>[p];
memset(pr_t,0,p*sizeof(Triplet<T,T,int>));
int counter=0;
const T lambda_d_rho=_lambda/rho;
int renorm2=0;
int forgetting_offset=-1;
int middle_offset=0;
for (int t = 1; t<= iters; ++t) {
const T weight=this->t_to_weight(t);
_function->choose_random_batch();
const T onemw=T(1.0)-weight;
const int next_counter=(counter + 1) % n;
sumw[next_counter]= sumw[counter]+weight*lambda_d_rho;
if (sumw[next_counter] > T(1e50)) {
sumw.add(-sumw[next_counter]);
}
//const int prev_counter = (n + counter - 1) % n;
prod[next_counter] = (t==1) ? T(1.0) : prod[counter]*onemw;
if (prod[next_counter] < T(1e-8) || forgetting_offset == next_counter) {
T scal=T(1.0)/prod[next_counter];
for (int i = middle_offset; i != next_counter; i = (i+1) %n)
prod[i] *= scal;
prod[next_counter]=T(1.0);
forgetting_offset=middle_offset;
middle_offset=next_counter;
renorm2++;
}
const T thrs1 = onemw*lambda_d_rho;
const T thrs2 = weight/rho;
_function->refData(col); // might be counter instead
T* v = col.rawX();
INTM * r = col.rawR();
const int L = static_cast<int>(col.L());
T s = 0;
for (int i = 0; i< L; ++i) {
const int ind=static_cast<int>(r[i]);
T& prx = pr_t[ind].x;
T& prz = pr_t[ind].z;
const int& prs = pr_t[ind].s;
const T val =abs<T>(prx);
const bool negval = prx < 0;
const int sc = prs;
if (val) {
if (sc==counter) {
prz = negval ? prx - thrs1 : prx + thrs1;
s+=prx*v[i];
} else {
const T val2 = val + sumw[sc];
const T new_val = val2 - sumw[counter];
T& xval=prx;
if (new_val > 0) {
xval = negval ? -new_val : new_val;
prz =new_val + thrs1;
s+=xval*v[i];
} else {
xval=0;
// test if result is zero.
int up = counter + n;
int down = sc <= counter ? sc + n : sc;
bool proceed = true;
const int forget_offset = forgetting_offset <= counter ? forgetting_offset +n : forgetting_offset;
if (sc <= forget_offset) {
if (val2 <= sumw[forgetting_offset]) {
prz=0;
proceed = false;
} else {
down = forget_offset;
}
}
if (proceed) {
while ( up - down > 1) {
int current = (up+down)/2;
assert(current != up);
assert(current != down);
if (val2 <= sumw[current % n]) {
up=current; // always satisfied for up
} else {
down=current; // never satisfied for down
}
}
const T z_jm1= val2 - sumw[(n + up-1) % n] + lambda_d_rho;
prz = z_jm1 * (prod[next_counter]/prod[up % n]);
}
}
if (negval) prz=-prz;
}
} else {
const int down = sc <= counter ? sc + n : sc;
const int forget_offset = forgetting_offset <= counter ? forgetting_offset +n : forgetting_offset;
prz = down <= forget_offset ? 0 : prz*(prod[next_counter]/prod[sc]);
}
}
s = _function->scal_grad(s);
s *= thrs2;
/// gradient is s*col
/// all pr_x corresponding to non-zeros grad have been updated
/// pr_z is updated except for the gradient
counter = next_counter;
for (int i = 0; i< L; ++i) {
const int ind=static_cast<int>(r[i]);
T& prx = pr_t[ind].x;
T& prz = pr_t[ind].z;
int& prs = pr_t[ind].s;
prz -= s*v[i];
if (prz > lambda_d_rho) {
prx=prz-lambda_d_rho;
} else if (prz < -lambda_d_rho) {
prx=prz+lambda_d_rho;
} else {
prx=0;
}
prs = counter;
}
if (t==iters) {
for (int i = 0; i<p; ++i) {
T& prx = pr_t[i].x;
const int& prs = pr_t[i].s;
if (prx && counter != prs) {
const T diff=sumw[counter] - sumw[prs];
if (prx > diff) {
prx -= diff;
} else if (prx < -diff) {
prx += diff;
} else {
prx =0;
}
}
}
}
}
for (int i = 0; i<p; ++i) {
w[i]=pr_t[i].x;
}
delete[](pr_t);
time.stop();
if (evaluate) {
this->_logs[0]=_function->eval(w)+_lambda*w.asum();
this->_logs[1]=0;
}
this->_logs[2]=time.getElapsed();
if (verbose && evaluate) {
time.printElapsed();
cout << "Result without averaging after " << iters << " iterations, cost = " << this->_logs[0] << endl;
}
}
template <typename T, typename U>
void stochasticProximal(const Vector<T>& y, const U& X, const Vector<T>& w0,
Vector<T>& w, Vector<T>& wav, const ParamFISTA<T>& paramprox, const ParamSurrogate<T>& param,
const T lambda, Vector<T>& logs) {
SmoothFunction<T, U >* function;
switch (paramprox.loss) {
case LOG: function = new LogisticFunction<T, U>(X,y,param.normalized,param.minibatches,param.random); break;
case SQUARE: function = new SquareFunction<T, U>(X,y,param.normalized,param.minibatches,param.random); break;
default: function=NULL; cerr << "Unknown loss function" << endl; return;
}
if (paramprox.regul==RIDGE && param.optimized_solver) {
StochasticSmoothRidgeSolver<T, U> solver(*function,lambda,param);
solver.solve(w0,w,wav,param.iters,param.determineEta,param.averaging_mode,param.verbose);
solver.getLogs(logs);
} else {
Regularizer<T,Vector<T> >* regul = setRegularizerVectors<T>(paramprox);
ProximalSurrogate<T, U> surrogate(function,regul,lambda);
StochasticSolver<T, U> solver(surrogate,param);
solver.solve(w0,w,wav,param.iters,param.determineEta,param.averaging_mode,param.verbose);
solver.getLogs(logs);
delete(regul);
}
delete(function);
};
template <typename T>
void stochasticProximalSparse(const Vector<T>& y, const SpMatrix<T>& X, const Vector<T>& w0,
Vector<T>& w, Vector<T>& wav, const ParamFISTA<T>& paramprox, const ParamSurrogate<T>& param,
const T lambda, Vector<T>& logs) {
SmoothFunction<T, SpMatrix<T> >* function;
switch (paramprox.loss) {
case LOG: function = new LogisticFunction<T, SpMatrix<T> >(X,y,param.normalized,param.minibatches,param.random); break;
case SQUARE: function = new SquareFunction<T, SpMatrix<T> >(X,y,param.normalized,param.minibatches,param.random); break;
default: function=NULL; cerr << "Unknown loss function" << endl; return;
}
if (paramprox.regul==RIDGE && param.optimized_solver) {
StochasticSmoothRidgeSolver<T, SpMatrix<T> > solver(*function,lambda,param);
solver.solve(w0,w,wav,param.iters,param.determineEta,param.averaging_mode,param.verbose);
solver.getLogs(logs);
} else if (paramprox.regul==L1 && param.averaging_mode == 0 && param.optimized_solver) {
StochasticSmoothL1Solver<T> solver(*function,lambda,param);
solver.solve(w0,w,wav,param.iters,param.determineEta,param.averaging_mode,param.verbose);
solver.getLogs(logs);
} else {
Regularizer<T,Vector<T> >* regul = setRegularizerVectors<T>(paramprox);
ProximalSurrogate<T, SpMatrix<T> > surrogate(function,regul,lambda);
StochasticSolver<T, SpMatrix<T> > solver(surrogate,param);
solver.solve(w0,w,wav,param.iters,param.determineEta,param.averaging_mode,param.verbose);
solver.getLogs(logs);
delete(regul);
}
delete(function);
};
template <typename T, typename U>
void stochasticProximal(const Vector<T>& y, const U& X, const Matrix<T>& w0M,
Matrix<T>& wM, Matrix<T>& wavM, const ParamFISTA<T>& paramprox, const ParamSurrogate<T>& param,
const Vector<T>& lambdaV, Matrix<T>& logsM) {
const int num_lambdas=static_cast<int>(lambdaV.n());
int i;
#pragma omp parallel for private(i)
for (i = 0; i<num_lambdas; ++i) {
Vector<T> w0;
Vector<T> w;
Vector<T> wav;
Vector<T> logs;
w0M.refCol(i,w0);
wM.refCol(i,w);
wavM.refCol(i,wav);
logsM.refCol(i,logs);
stochasticProximal(y,X,w0,w,wav,paramprox,param,lambdaV[i],logs);
}
};
template <typename T>
void stochasticProximalSparse(const Vector<T>& y, const SpMatrix<T>& X, const Matrix<T>& w0M,
Matrix<T>& wM, Matrix<T>& wavM, const ParamFISTA<T>& paramprox, const ParamSurrogate<T>& param,
const Vector<T>& lambdaV, Matrix<T>& logsM) {
const int num_lambdas=static_cast<int>(lambdaV.n());
int i;
#pragma omp parallel for private(i)
for (i = 0; i<num_lambdas; ++i) {
Vector<T> w0;
Vector<T> w;
Vector<T> wav;
Vector<T> logs;
w0M.refCol(i,w0);
wM.refCol(i,w);
wavM.refCol(i,wav);
logsM.refCol(i,logs);
stochasticProximalSparse(y,X,w0,w,wav,paramprox,param,lambdaV[i],logs);
}
};
template <typename T, typename U>
class IncrementalSolver {
public:
IncrementalSolver(IncrementalSurrogate<T,U>& surrogate, const
ParamSurrogate<T>& param) :
_surrogate(&surrogate), _minibatches(param.minibatches) {
_logs.resize(3);
};
~IncrementalSolver() { };
virtual void solve(const Vector<T>& w0, Vector<T>& w, const int epochs = 0, const bool
verbose = false, const bool evaluate = true, const int strategy = 3,
const bool warm_restart = false);
virtual void getLogs(Vector<T>& logs) { logs.copy(_logs); };
protected:
void auto_parameters(const Vector<T>& w0, Vector<T>& w, const int strategy = 3);
private:
explicit IncrementalSolver<T,U>(const IncrementalSolver<T,U>& dict);
IncrementalSolver<T,U>& operator=(const IncrementalSolver<T,U>& dict);
virtual void subsample(const int newn) { _surrogate->subsample(newn); };
virtual void un_subsample() { _surrogate->un_subsample(); };
Vector<T> _logs;
IncrementalSurrogate<T,U>* _surrogate;
int _minibatches;
};
template <typename T, typename U>
void IncrementalSolver<T,U>::solve(const Vector<T>& w0, Vector<T>& w, const int epochs,
const bool verbose, const bool
evaluate, const int strategy, const bool warm_restart) {
if (verbose && epochs > 0 && !warm_restart) {
cout << "Standard Incremental Solver" << endl;
}
/// strategy 0: do not change L
/// strategy 1: try to adjust L
Timer time;
time.start();
_logs.set(0);
if (strategy >= 1 && strategy < 4 && !warm_restart) auto_parameters(w0,w,strategy);
w.copy(w0);
if (!warm_restart)
_surrogate->initialize_incremental(w0,strategy);
if (strategy == 4) _surrogate->set_param_strong_convexity();
const int num_batches = _surrogate->num_batches();
if (strategy == 3) _surrogate->reset_diff();
if (epochs > 0) {
/// first epoch
_surrogate->setRandom(warm_restart);
_surrogate->setFirstPass(!warm_restart);
for (int j = 0; j< num_batches; ++j) {
_surrogate->update_incremental_surrogate(w);
_surrogate->minimize_incremental_surrogate(w);
}
if (strategy == 3 && warm_restart) {
if ((_surrogate->get_diff()) < 0) {
T fact = (_surrogate->get_scal_diff());
_surrogate->set_param(fact*_surrogate->get_param());
}
_surrogate->reset_diff();
}
if (strategy == 3) _surrogate->reset_diff();
/// classical epochs
_surrogate->setRandom(true);
_surrogate->setFirstPass(false);
for (int i = 1; i<epochs; ++i) {
for (int j = 0; j< num_batches; ++j) {
_surrogate->update_incremental_surrogate(w);
_surrogate->minimize_incremental_surrogate(w);
}
if (strategy == 3) {
if ((_surrogate->get_diff()) < 0) {
T fact = (_surrogate->get_scal_diff());
_surrogate->set_param(fact*_surrogate->get_param());
}
_surrogate->reset_diff();
}
}
}
time.stop();
_logs[2]=time.getElapsed();
if (evaluate)
_logs[0]=_surrogate->eval_function(w);
if (verbose && evaluate) {
time.printElapsed();
//timer2.printElapsed();
//timer3.printElapsed();
cout << "Result after " << epochs << " epochs, cost = " << this->_logs[0] << endl;
}
};
template <typename T, typename U>
void IncrementalSolver<T,U>::auto_parameters(const Vector<T>& w0, Vector<T>& w, const int strategy) {
const int newn= _surrogate->n()/20;
/// inspired from bottou's determineta0 function
T factor = 0.5;
T lo_param = _surrogate->get_param();
_surrogate->subsample(newn);
this->solve(w0,w,1,false,true,0);
T loCost = _logs[0];
//cerr << _logs[0] << " ";
// try to reduce
for (int t = 0; t<15; ++t)
{
T hi_param = lo_param* factor;
if (hi_param < 1e-8 || hi_param > 1) break;
_surrogate->set_param(hi_param);
this->solve(w0,w,1,false,true,0);
T hiCost = _logs[0];
// cerr << _logs[0] << " ";
if (hiCost > loCost && t==0) {
factor=2.0;
} else {
if (hiCost >= loCost) break;
lo_param=hi_param;
loCost=hiCost;
}
}
// cerr << endl;
_surrogate->set_param(strategy >= 2 ? lo_param/20 : lo_param);
// cerr << "param: " << lo_param << endl;
_surrogate->un_subsample();
};
template <typename T, typename U>
void incrementalSmoothRidge(SmoothFunction<T,U>& function, const Vector<T>& w0,
Vector<T>& w, Vector<T>& alphas, const int epochs, const T lambda,
Vector<T>& logs, const bool init = true, const bool evaluate = true) {
const int n = function.n();
Timer time;
time.start();
logs.set(0);
if (init) {
alphas.resize(n);
alphas.setZeros();
}
w.copy(w0);
const T scal = T(1.0)/(n*lambda);
for (int i = 0; i<epochs; ++i) {
typename U::col col;
function.setRandom(i >= 1 || !init);
for (int j = 0; j<n; ++j) {
function.choose_random_batch();
const int ind = function.get_batch();
function.refData(col);
const T alphaold=alphas[ind];
const T s = w.dot(col);
const T y = function.getY();
alphas[ind]=function.gradient_simple(y,s);
w.add(col,(alphaold-alphas[ind])*scal);
}
}
time.stop();
logs[2]=time.getElapsed();
if (evaluate)
logs[0]=function.eval(w)+0.5*lambda*w.nrm2sq();
};
template <typename T, typename U>
void incrementalProximal(const Vector<T>& y, const U& X, const Vector<T>& w0,
Vector<T>& w, const ParamFISTA<T>& paramprox, const ParamSurrogate<T>& param,
const T lambda, Vector<T>& logs) {
SmoothFunction<T, U >* function;
switch (paramprox.loss) {
case LOG: function = new LogisticFunction<T, U>(X,y,param.normalized,param.minibatches,param.random); break;
case SQUARE: function = new SquareFunction<T, U>(X,y,param.normalized,param.minibatches,param.random); break;
default: function=NULL; cerr << "Unknown loss function" << endl; return;
}
Regularizer<T,Vector<T> >* regul = setRegularizerVectors<T>(paramprox);
if (regul->id()==RIDGE && param.strategy==4) {
Vector<T> alphas;
incrementalSmoothRidge(*function,w0,w,alphas,param.epochs,lambda,logs,true,true);
} else {
ProximalSurrogate<T, U> surrogate(function,regul,lambda);
IncrementalSolver<T, U> solver(surrogate,param);
solver.solve(w0,w,param.epochs,param.verbose,true,param.strategy);
solver.getLogs(logs);
}
delete(regul);
delete(function);
};
template <typename T, typename U>
void incrementalProximal(const Vector<T>& y, const U& X, const Matrix<T>& w0M,
Matrix<T>& wM, const ParamFISTA<T>& paramprox, const ParamSurrogate<T>& param,
const Vector<T>& lambdaV, Matrix<T>& logsM) {
if (param.verbose) {
cout << "Incremental proximal algorithm" << endl;
cout << "heuristic mode " << param.strategy << endl;
if (param.strategy==2)
cout << " WARNING, strategy 2 is unsafe " << endl;
}
const int num_lambdas=lambdaV.n();
int i;
#pragma omp parallel for private(i)
for (i = 0; i<num_lambdas; ++i) {
Vector<T> w0;
Vector<T> w;
Vector<T> logs;
w0M.refCol(i,w0);
wM.refCol(i,w);
logsM.refCol(i,logs);
incrementalProximal(y,X,w0,w,paramprox,param,lambdaV[i],logs);
}
};
template <typename T, typename U>
void incrementalProximalSeq(const Vector<T>& y, const U& X, const Matrix<T>& w0M,
Matrix<T>& wM, const ParamFISTA<T>& paramprox, const ParamSurrogate<T>& param,
const Vector<T>& lambdaV, Matrix<T>& logsM) {
const int num_lambdas=lambdaV.n();
SmoothFunction<T, U >* function;
switch (paramprox.loss) {
case LOG: function = new LogisticFunction<T, U>(X,y,param.normalized,param.minibatches,param.random); break;
case SQUARE: function = new SquareFunction<T, U>(X,y,param.normalized,param.minibatches,param.random); break;
default: function=NULL; cerr << "Unknown loss function" << endl; return;
}
Regularizer<T,Vector<T> >* regul = setRegularizerVectors<T>(paramprox);
ProximalSurrogate<T, U> surrogate(function,regul,lambdaV[0]);
IncrementalSolver<T, U> solver(surrogate,param);
cout << "path-following incremental algorithm" << endl;
cout << "heuristic mode " << param.strategy << endl;
if (param.strategy==2)
cout << " WARNING, strategy 2 is unsafe " << endl;
Vector<T> alphas;
for (int i = 0; i<num_lambdas; ++i) {
Vector<T> w0;
Vector<T> w;
Vector<T> logs;
if (i==0) {
w0M.refCol(i,w0);
} else {
wM.refCol(i-1,w0);
}
wM.refCol(i,w);
logsM.refCol(i,logs);
if (regul->id()==RIDGE && param.strategy==4) {
incrementalSmoothRidge(*function,w0,w,alphas,param.epochs,lambdaV[i],logs,i==0,true);
} else {
surrogate.changeLambda(lambdaV[i]);
//solver.solve(w0,w,param.epochs,param.verbose,true,param.strategy,false);
solver.solve(w0,w,param.epochs,param.verbose,true,param.strategy,i > 0);
solver.getLogs(logs);
}
}
delete(regul);
delete(function);
};
#endif
|
snoop.c | /*
* compute the duplex structure of two RNA strands,
* allowing only inter-strand base pairs.
* see cofold() for computing hybrid structures without
* restriction.
*
* Ivo Hofacker
* Vienna RNA package
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <ctype.h>
#include <string.h>
#include "ViennaRNA/utils/basic.h"
#include "ViennaRNA/utils/strings.h"
#include "ViennaRNA/params/default.h"
#include "ViennaRNA/fold_vars.h"
#include "ViennaRNA/snofold.h"
#include "ViennaRNA/pair_mat.h"
#include "ViennaRNA/params/basic.h"
#include "ViennaRNA/snoop.h"
#include "ViennaRNA/plotting/probabilities.h"
#include "ViennaRNA/plotting/structures.h"
/* #include "ViennaRNA/fold.h" */
#include "ViennaRNA/duplex.h"
#include "ViennaRNA/loops/all.h"
#define STACK_BULGE1 1 /* stacking energies for bulges of size 1 */
#define NEW_NINIO 1 /* new asymetry penalty */
PRIVATE void
encode_seqs(const char *s1,
const char *s2);
PRIVATE short *
encode_seq(const char *seq);
PRIVATE void
find_max_snoop(const char *s1,
const char *s2,
const int max,
const int alignment_length,
const int *position,
const int delta,
const int distance,
const int penalty,
const int threshloop,
const int threshLE,
const int threshRE,
const int threshDE,
const int threshTE,
const int threshSE,
const int threshD,
const int half_stem,
const int max_half_stem,
const int min_s2,
const int max_s2,
const int min_s1,
const int max_s1,
const int min_d1,
const int min_d2,
const char *name,
const int fullStemEnergy);
PRIVATE void
find_max_snoop_XS(const char *s1,
const char *s2,
const int **access_s1,
const int max,
const int alignment_length,
const int *position,
const int *position_j,
const int delta,
const int distance,
const int penalty,
const int threshloop,
const int threshLE,
const int threshRE,
const int threshDE,
const int threshTE,
const int threshSE,
const int threshD,
const int half_stem,
const int max_half_stem,
const int min_s2,
const int max_s2,
const int min_s1,
const int max_s1,
const int min_d1,
const int min_d2,
const char *name,
const int fullStemEnergy);
PRIVATE char *
alisnoop_backtrack(int i,
int j,
const char **s2,
int *Duplex_El,
int *Duplex_Er,
int *Loop_E,
int *Loop_D,
int *u,
int *pscd,
int *psct,
int *pscg,
const int penalty,
const int threshloop,
const int threshLE,
const int threshRE,
const int threshDE,
const int threshD,
const int half_stem,
const int max_half_stem,
const int min_s2,
const int max_s2,
const int min_s1,
const int max_s1,
const int min_d1,
const int min_d2,
const short **S1,
const short **S2);
PRIVATE char *
snoop_backtrack(int i,
int j,
const char *s2,
int *Duplex_El,
int *Duplex_Er,
int *Loop_E,
int *Loop_D,
int *u,
const int penalty,
const int threshloop,
const int threshLE,
const int threshRE,
const int threshDE,
const int threshD,
const int half_stem,
const int max_half_stem,
const int min_s2,
const int max_s2,
const int min_s1,
const int max_s1,
const int min_d1,
const int min_d2);
PRIVATE char *
snoop_backtrack_XS(int i,
int j,
const char *s2,
int *Duplex_El,
int *Duplex_Er,
int *Loop_E,
int *Loop_D,
int *u,
const int penalty,
const int threshloop,
const int threshLE,
const int threshRE,
const int threshDE,
const int threshD,
const int half_stem,
const int max_half_stem,
const int min_s2,
const int max_s2,
const int min_s1,
const int max_s1,
const int min_d1,
const int min_d2);
PRIVATE int
compare(const void *sub1,
const void *sub2);
PRIVATE int
covscore(const int *types,
int n_seq);
PRIVATE short *
aliencode_seq(const char *sequence);
PUBLIC int snoop_subopt_sorted = 0; /* from subopt.c, default 0 */
/*@unused@*/
#define MAXLOOP_L 3
#define MIN2(A, B) ((A) < (B) ? (A) : (B))
#define MAX2(A, B) ((A) > (B) ? (A) : (B))
#define ASS 1
PRIVATE vrna_param_t *P = NULL;
PRIVATE int **c = NULL; /* energy array, given that i-j pair */
PRIVATE int **r = NULL;
PRIVATE int **lc = NULL; /* energy array, given that i-j pair */
PRIVATE int **lr = NULL;
PRIVATE int **c_fill = NULL;
PRIVATE int **r_fill = NULL;
PRIVATE int **lpair = NULL;
PRIVATE short *S1 = NULL, *SS1 = NULL, *S2 = NULL, *SS2 = NULL;
PRIVATE short *S1_fill = NULL, *SS1_fill = NULL, *S2_fill = NULL, *SS2_fill = NULL;
PRIVATE int n1, n2; /* sequence lengths */
extern int cut_point;
PRIVATE int delay_free = 0;
/*--------------------------------------------------------------------------*/
snoopT
alisnoopfold(const char **s1,
const char **s2,
const int penalty,
const int threshloop,
const int threshLE,
const int threshRE,
const int threshDE,
const int threshD,
const int half_stem,
const int max_half_stem,
const int min_s2,
const int max_s2,
const int min_s1,
const int max_s1,
const int min_d1,
const int min_d2)
{
int s, n_seq;
int i, j, E, l1, Emin = INF, i_min = 0, j_min = 0;
char *struc;
snoopT mfe;
int *indx;
int *mLoop;
int *cLoop;
folden **foldlist;
folden **foldlist_XS;
int Duplex_El, Duplex_Er, pscd, psct, pscg;
int Loop_D;
int u;
int Loop_E;
short **Sali1, **Sali2;
int *type, *type2, *type3;
vrna_md_t md;
Duplex_El = 0;
Duplex_Er = 0;
Loop_E = 0;
Loop_D = 0;
pscd = 0;
psct = 0;
pscg = 0;
snoexport_fold_arrays(&indx, &mLoop, &cLoop, &foldlist, &foldlist_XS);
n1 = (int)strlen(s1[0]);
n2 = (int)strlen(s2[0]);
for (s = 0; s1[s] != NULL; s++) ;
n_seq = s;
for (s = 0; s2[s] != NULL; s++) ;
if (n_seq != s)
vrna_message_error("unequal number of sequences in aliduplexfold()\n");
set_model_details(&md);
if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) {
snoupdate_fold_params();
if (P)
free(P);
P = vrna_params(&md);
make_pair_matrix();
}
c = (int **)vrna_alloc(sizeof(int *) * (n1 + 1));
r = (int **)vrna_alloc(sizeof(int *) * (n1 + 1));
for (i = 0; i <= n1; i++) {
c[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1));
r[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1));
for (j = n2; j > -1; j--) {
c[i][j] = INF;
r[i][j] = INF;
}
}
Sali1 = (short **)vrna_alloc((n_seq + 1) * sizeof(short *));
Sali2 = (short **)vrna_alloc((n_seq + 1) * sizeof(short *));
for (s = 0; s < n_seq; s++) {
if ((int)strlen(s1[s]) != n1)
vrna_message_error("uneqal seqence lengths");
if ((int)strlen(s2[s]) != n2)
vrna_message_error("uneqal seqence lengths");
Sali1[s] = aliencode_seq(s1[s]);
Sali2[s] = aliencode_seq(s2[s]);
}
type = (int *)vrna_alloc(n_seq * sizeof(int));
type2 = (int *)vrna_alloc(n_seq * sizeof(int));
type3 = (int *)vrna_alloc(n_seq * sizeof(int));
/* encode_seqs(s1, s2); */
for (i = 6; i <= n1 - 5; i++) {
int U;
U = 0;
for (s = 0; s < n_seq; s++)
U += Sali1[s][i - 2];
U = (U == (n_seq) * 4 ? 1 : 0);
for (j = n2 - min_d2; j > min_d1; j--) {
int type4, k, l, psc, psc2, psc3;
for (s = 0; s < n_seq; s++)
type[s] = pair[Sali1[s][i]][Sali2[s][j]];
psc = covscore(type, n_seq);
for (s = 0; s < n_seq; s++)
if (type[s] == 0)
type[s] = 7;
c[i][j] = (psc >= MINPSCORE) ? (n_seq * P->DuplexInit) : INF;
if (psc < MINPSCORE)
continue;
if (/* pair[Sali1[i+1]][Sali2[j-1]] && */
U && j < max_s1 && j > min_s1 &&
j > n2 - max_s2 - max_half_stem &&
j < n2 - min_s2 - half_stem) {
/*constraint on s2 and i*/
folden *temp;
temp = foldlist[j + 1];
while (temp->next) {
int k = temp->k;
for (s = 0; s < n_seq; s++) {
type2[s] = pair[Sali1[s][i - 3]][Sali2[s][k + 1]];
type3[s] = pair[Sali1[s][i - 4]][Sali2[s][k + 1]];
}
psc2 = covscore(type2, n_seq);
psc3 = covscore(type3, n_seq);
if (psc2 > MINPSCORE)
r[i][j] = MIN2(r[i][j], c[i - 3][k + 1] + temp->energy);
if (psc3 > MINPSCORE)
r[i][j] = MIN2(r[i][j], c[i - 4][k + 1] + temp->energy);
temp = temp->next;
}
}
/* dangle 5'SIDE relative to the mRNA */
for (s = 0; s < n_seq; s++)
c[i][j] += vrna_E_ext_stem(type[s], Sali1[s][i - 1], Sali2[s][j + 1], P);
for (k = i - 1; k > 0 && (i - k) < MAXLOOP_L; k--) {
for (l = j + 1; l <= n2; l++) {
if (i - k + l - j > 2 * MAXLOOP_L - 2)
break;
if (abs(i - k - l + j) >= ASS)
continue;
for (E = s = 0; s < n_seq; s++) {
type4 = pair[Sali1[s][k]][Sali2[s][l]];
if (type4 == 0)
type4 = 7;
E += E_IntLoop(i - k - 1, l - j - 1, type4, rtype[type[s]],
Sali1[s][k + 1], Sali2[s][l - 1], Sali1[s][i - 1], Sali2[s][j + 1], P);
}
c[i][j] = MIN2(c[i][j], c[k][l] + E);
r[i][j] = MIN2(r[i][j], r[k][l] + E);
}
}
c[i][j] -= psc;
r[i][j] -= psc;
E = r[i][j];
for (s = 0; s < n_seq; s++)
E += vrna_E_ext_stem(rtype[type[s]], Sali2[s][j - 1], Sali1[s][i + 1], P);
/**
*** if (i<n1) E += P->dangle3[rtype[type[s]]][Sali1[s][i+1]];
*** if (j>1) E += P->dangle5[rtype[type[s]]][Sali2[s][j-1]];
*** if (type[s]>2) E += P->TerminalAU;
**/
if (E < Emin) {
Emin = E;
i_min = i;
j_min = j;
}
}
}
if (Emin > 0) {
printf("no target found under the constraints chosen\n");
for (i = 0; i <= n1; i++) {
free(r[i]);
free(c[i]);
}
free(c);
free(r);
for (s = 0; s < n_seq; s++) {
free(Sali1[s]);
free(Sali2[s]);
}
free(Sali1);
free(Sali2);
free(S2);
free(SS1);
free(SS2);
free(type);
free(type2);
free(type3);
mfe.energy = INF;
mfe.structure = NULL;
return mfe;
}
struc = alisnoop_backtrack(i_min, j_min, (const char **)s2,
&Duplex_El, &Duplex_Er, &Loop_E,
&Loop_D, &u, &pscd, &psct, &pscg,
penalty, threshloop, threshLE,
threshRE, threshDE, threshD,
half_stem, max_half_stem, min_s2,
max_s2, min_s1, max_s1, min_d1,
min_d2, (const short **)Sali1, (const short **)Sali2);
/* if (i_min<n1-5) i_min++; */
/* if (j_min>6 ) j_min--; */
l1 = strchr(struc, '&') - struc;
mfe.i = i_min - 5;
mfe.j = j_min - 5;
mfe.u = u - 5;
mfe.Duplex_Er = (float)Duplex_Er / 100;
mfe.Duplex_El = (float)Duplex_El / 100;
mfe.Loop_D = (float)Loop_D / 100;
mfe.Loop_E = (float)Loop_E / 100;
mfe.energy = (float)Emin / 100;
/* mfe.fullStemEnergy = (float) fullStemEnergy/100; */
mfe.pscd = pscd;
mfe.psct = psct;
mfe.structure = struc;
for (s = 0; s < n_seq; s++) {
free(Sali1[s]);
free(Sali2[s]);
}
free(Sali1);
free(Sali2);
free(type);
free(type2);
free(type3);
if (!delay_free) {
for (i = 0; i <= n1; i++) {
free(r[i]);
free(c[i]);
}
free(c);
free(r);
free(S2);
free(SS1);
free(SS2);
}
return mfe;
}
PUBLIC snoopT *
alisnoop_subopt(const char **s1,
const char **s2,
int delta,
int w,
const int penalty,
const int threshloop,
const int threshLE,
const int threshRE,
const int threshDE,
const int threshTE,
const int threshSE,
const int threshD,
const int distance,
const int half_stem,
const int max_half_stem,
const int min_s2,
const int max_s2,
const int min_s1,
const int max_s1,
const int min_d1,
const int min_d2)
{
short **Sali1, **Sali2;
/* printf("%d %d\n", min_s2, max_s2); */
int i, j, s, n_seq, n1, n2, E, n_subopt = 0, n_max;
char *struc;
snoopT mfe;
snoopT *subopt;
int thresh;
int *type;
int Duplex_El, Duplex_Er, Loop_E, pscd, psct, pscg;
int Loop_D;
Duplex_El = 0;
Duplex_Er = 0;
Loop_E = 0;
Loop_D = 0;
pscd = 0;
psct = 0;
pscg = 0;
int u;
u = 0;
n_max = 16;
subopt = (snoopT *)vrna_alloc(n_max * sizeof(snoopT));
delay_free = 1;
mfe = alisnoopfold(s1, s2, penalty, threshloop, threshLE, threshRE, threshDE, threshD,
half_stem, max_half_stem,
min_s2, max_s2, min_s1, max_s1, min_d1, min_d2);
if (mfe.energy > 0) {
free(subopt);
delay_free = 0;
return NULL;
}
thresh = MIN2((int)((mfe.Duplex_Er + mfe.Duplex_El + mfe.Loop_E) * 100 + 0.1 + 410) + delta,
threshTE);
/* subopt[n_subopt++]=mfe; */
free(mfe.structure);
n1 = (int)strlen(s1[0]);
n2 = (int)strlen(s2[0]);
for (s = 0; s1[s] != NULL; s++) ;
n_seq = s;
Sali1 = (short **)vrna_alloc((n_seq + 1) * sizeof(short *));
Sali2 = (short **)vrna_alloc((n_seq + 1) * sizeof(short *));
for (s = 0; s < n_seq; s++) {
if ((int)strlen(s1[s]) != n1)
vrna_message_error("uneqal seqence lengths");
if ((int)strlen(s2[s]) != n2)
vrna_message_error("uneqal seqence lengths");
Sali1[s] = aliencode_seq(s1[s]);
Sali2[s] = aliencode_seq(s2[s]);
}
Sali1[n_seq] = NULL;
Sali2[n_seq] = NULL;
type = (int *)vrna_alloc(n_seq * sizeof(int));
for (i = n1; i > 1; i--) {
for (j = 1; j <= n2; j++) {
int ii, jj, Ed, psc, skip;
for (s = 0; s < n_seq; s++)
type[s] = pair[Sali2[s][j]][Sali1[s][i]];
psc = covscore(type, n_seq);
for (s = 0; s < n_seq; s++)
if (type[s] == 0)
type[s] = 7;
if (psc < MINPSCORE)
continue;
E = Ed = r[i][j];
for (s = 0; s < n_seq; s++) {
/* if (i<n1-5) Ed += P->dangle3[type[s]][Sali1[s][i+1]]; */
/* if (j>6) Ed += P->dangle5[type[s]][Sali2[s][j-1]]; */
if (type[s] > 2)
Ed += P->TerminalAU;
}
if (Ed > thresh)
continue;
/* too keep output small, remove hits that are dominated by a
* better one close (w) by. For simplicity we do test without
* adding dangles, which is slightly inaccurate.
*/
w = 1;
for (skip = 0, ii = MAX2(i - w, 1); (ii <= MIN2(i + w, n1)) && type; ii++) {
for (jj = MAX2(j - w, 1); jj <= MIN2(j + w, n2); jj++)
if (r[ii][jj] < E) {
skip = 1;
break;
}
}
if (skip)
continue;
psct = 0;
pscg = 0;
struc = alisnoop_backtrack(i,
j,
s2,
&Duplex_El,
&Duplex_Er,
&Loop_E,
&Loop_D,
&u,
&pscd,
&psct,
&pscg,
penalty,
threshloop,
threshLE,
threshRE,
threshDE,
threshD,
half_stem,
max_half_stem,
min_s2,
max_s2,
min_s1,
max_s1,
min_d1,
min_d2,
(const short int **)Sali1,
(const int short **)Sali2);
if (Duplex_Er > threshRE || Duplex_El > threshLE || Loop_D > threshD ||
(Duplex_Er + Duplex_El) > threshDE ||
(Duplex_Er + Duplex_El + Loop_E) > threshTE ||
(Duplex_Er + Duplex_El + Loop_E + Loop_D + 410) > threshSE) {
/* printf(" Duplex_Er %d threshRE %d Duplex_El %d threshLE %d \n" */
/* " Duplex_Er + Duplex_El %d threshDE %d \n" */
/* " Duplex_Er + Duplex_El + Loop_E %d threshTE %d \n" */
/* " Duplex_Er + Duplex_El + Loop_E + Loop_D %d threshSE %d \n", */
/* Duplex_Er , threshRE , Duplex_El ,threshLE, */
/* Duplex_Er + Duplex_El, threshDE, */
/* Duplex_Er + Duplex_El+ Loop_E , threshTE, */
/* Duplex_Er + Duplex_El+ Loop_E + Loop_D, threshSE); */
Duplex_Er = 0;
Duplex_El = 0;
Loop_E = 0;
Loop_D = 0;
u = 0,
free(struc);
continue;
}
if (n_subopt + 1 >= n_max) {
n_max *= 2;
subopt = (snoopT *)vrna_realloc(subopt, n_max * sizeof(snoopT));
}
subopt[n_subopt].i = i - 5;
subopt[n_subopt].j = j - 5;
subopt[n_subopt].u = u - 5;
subopt[n_subopt].Duplex_Er = Duplex_Er * 0.01;
subopt[n_subopt].Duplex_El = Duplex_El * 0.01;
subopt[n_subopt].Loop_E = Loop_E * 0.01;
subopt[n_subopt].Loop_D = Loop_D * 0.01;
subopt[n_subopt].energy = (Duplex_Er + Duplex_El + Loop_E + Loop_D + 410) * 0.01;
subopt[n_subopt].pscd = pscd * 0.01;
subopt[n_subopt].psct = -psct * 0.01;
subopt[n_subopt++].structure = struc;
/* i=u; */
Duplex_Er = 0;
Duplex_El = 0;
Loop_E = 0;
Loop_D = 0;
u = 0;
pscd = 0;
psct = 0;
}
}
for (i = 0; i <= n1; i++) {
free(c[i]);
free(r[i]);
}
free(c);
free(r);
for (s = 0; s < n_seq; s++) {
free(Sali1[s]);
free(Sali2[s]);
}
free(Sali1);
free(Sali2);
free(type);
if (snoop_subopt_sorted)
qsort(subopt, n_subopt, sizeof(snoopT), compare);
subopt[n_subopt].i = 0;
subopt[n_subopt].j = 0;
subopt[n_subopt].structure = NULL;
return subopt;
}
PRIVATE char *
alisnoop_backtrack(int i,
int j,
const char **snoseq,
int *Duplex_El,
int *Duplex_Er,
int *Loop_E,
int *Loop_D,
int *u,
int *pscd,
int *psct,
int *pscg,
const int penalty,
const int threshloop,
const int threshLE,
const int threshRE,
const int threshDE,
const int threshD,
const int half_stem,
const int max_half_stem,
const int min_s2,
const int max_s2,
const int min_s1,
const int max_s1,
const int min_d1,
const int min_d2,
const short **Sali1,
const short **Sali2)
{
/* backtrack structure going backwards from i, and forwards from j
* return structure in bracket notation with & as separator */
int k, l, *type, *type2, *type3, type4, E, traced, i0, j0, s, n_seq, psc;
int traced_r = 0; /* flag for following backtrack in c or r */
char *st1, *st2, *struc;
char *struc_loop;
n1 = (int)Sali1[0][0];
n2 = (int)Sali2[0][0];
for (s = 0; Sali1[s] != NULL; s++) ;
n_seq = s;
for (s = 0; Sali2[s] != NULL; s++) ;
if (n_seq != s)
vrna_message_error("unequal number of sequences in alibacktrack()\n");
st1 = (char *)vrna_alloc(sizeof(char) * (n1 + 1));
st2 = (char *)vrna_alloc(sizeof(char) * (n2 + 1));
type = (int *)vrna_alloc(n_seq * sizeof(int));
type2 = (int *)vrna_alloc(n_seq * sizeof(int));
type3 = (int *)vrna_alloc(n_seq * sizeof(int));
int *indx;
int *mLoop;
int *cLoop;
folden **foldlist, **foldlist_XS;
snoexport_fold_arrays(&indx, &mLoop, &cLoop, &foldlist, &foldlist_XS);
i0 = i;
j0 = j; /* MIN2(i+1,n1); j0=MAX2(j-1,1);!modified */
for (s = 0; s < n_seq; s++) {
type[s] = pair[Sali1[s][i]][Sali2[s][j]];
if (type[s] == 0)
type[s] = 7;
*Duplex_Er +=
vrna_E_ext_stem(rtype[type[s]], (j > 1) ? Sali2[s][j - 1] : -1, (i < n1) ? Sali1[s][i + 1] : -1, P);
/**
*** if (i<n1) *Duplex_Er += P->dangle3[rtype[type[s]]][Sali1[s][i+1]];
*** if (j>1) *Duplex_Er += P->dangle5[rtype[type[s]]][Sali2[s][j-1]];
*** if (type[s]>2) *Duplex_Er += P->TerminalAU;
**/
}
while (i > 0 && j <= n2 - min_d2) {
if (!traced_r) {
E = r[i][j];
traced = 0;
st1[i - 1] = '<';
st2[j - 1] = '>';
for (s = 0; s < n_seq; s++)
type[s] = pair[Sali1[s][i]][Sali2[s][j]];
psc = covscore(type, n_seq);
for (s = 0; s < n_seq; s++)
if (type[s] == 0)
type[s] = 7;
E += psc;
*pscd += psc;
for (k = i - 1; k > 0 && (i - k) < MAXLOOP_L; k--) {
for (l = j + 1; l <= n2; l++) {
int LE;
if (i - k + l - j > 2 * MAXLOOP_L - 2)
break;
if (abs(i - k - l + j) >= ASS)
continue;
for (s = LE = 0; s < n_seq; s++) {
type4 = pair[Sali1[s][k]][Sali2[s][l]];
if (type4 == 0)
type4 = 7;
LE +=
E_IntLoop(i - k - 1,
l - j - 1,
type4,
rtype[type[s]],
Sali1[s][k + 1],
Sali2[s][l - 1],
Sali1[s][i - 1],
Sali2[s][j + 1],
P);
}
if (E == r[k][l] + LE) {
traced = 1;
i = k;
j = l;
*Duplex_Er += LE;
break;
}
}
if (traced)
break;
}
if (!traced) {
int U = 0;
for (s = 0; s < n_seq; s++)
U += Sali1[s][i - 2];
U = (U == (n_seq) * 4 ? 1 : 0);
if (/* pair[Sali1[i+1]][Sali2[j-1]] && */ /* only U's are allowed */
U && j < max_s1 && j > min_s1 &&
j > n2 - max_s2 - max_half_stem &&
j < n2 - min_s2 - half_stem) {
int min_k, max_k;
max_k = MIN2(n2 - min_s2, j + max_half_stem + 1);
min_k = MAX2(j + half_stem + 1, n2 - max_s2);
folden *temp;
temp = foldlist[j + 1];
while (temp->next) {
int psc2, psc3;
int k = temp->k;
for (s = 0; s < n_seq; s++) {
type2[s] = pair[Sali1[s][i - 3]][Sali2[s][k + 1]];
type3[s] = pair[Sali1[s][i - 4]][Sali2[s][k + 1]];
}
psc2 = covscore(type2, n_seq);
psc3 = covscore(type3, n_seq);
if (psc2 > MINPSCORE /*&& pair[Sali1[i-4]][Sali2[k+2]]*/) {
/* introduce structure from RNAfold */
if (E == c[i - 3][k + 1] + temp->energy) {
*Loop_E = temp->energy;
st1[i - 3] = '|';
*u = i - 2;
int a, b;
/* int fix_ij=indx[k-1+1]+j+1; */
for (a = 0; a < MISMATCH; a++) {
for (b = 0; b < MISMATCH; b++) {
int ij = indx[k - 1 - a + 1] + j + 1 + b;
if (cLoop[ij] == temp->energy) {
/* int bla; */
struc_loop = alisnobacktrack_fold_from_pair(snoseq,
j + 1 + b,
k - a - 1 + 1,
psct);
a = INF;
b = INF;
}
}
}
traced = 1;
traced_r = 1;
i = i - 3;
j = k + 1;
break;
}
}
if (psc3 > MINPSCORE /*&& pair[Sali1[i-5]][Sali2[k+2]]*/) {
/* introduce structure from RNAfold */
if (E == c[i - 4][k + 1] + temp->energy) {
*Loop_E = temp->energy;
st1[i - 3] = '|';
*u = i - 2;
int a, b;
/* int fix_ij=indx[k-1+1]+j+1; */
for (a = 0; a < MISMATCH; a++) {
for (b = 0; b < MISMATCH; b++) {
int ij = indx[k - 1 - a + 1] + j + 1 + b;
if (cLoop[ij] == temp->energy) {
/* int bla; */
struc_loop = alisnobacktrack_fold_from_pair(snoseq,
j + 1 + b,
k - a - 1 + 1,
psct);
a = INF;
b = INF;
}
}
}
traced = 1;
traced_r = 1;
i = i - 4;
j = k + 1;
break;
}
} /* else if */
temp = temp->next;
} /* while temp-> next */
} /* test on j */
} /* traced? */
} /* traced_r? */
else {
E = c[i][j];
traced = 0;
st1[i - 1] = '<';
st2[j - 1] = '>';
for (s = 0; s < n_seq; s++)
type[s] = pair[Sali1[s][i]][Sali2[s][j]];
psc = covscore(type, n_seq);
for (s = 0; s < n_seq; s++)
if (type[s] == 0)
type[s] = 7;
E += psc;
*pscd += psc;
if (!type)
vrna_message_error("backtrack failed in fold duplex c");
for (k = i - 1; (i - k) < MAXLOOP_L; k--) {
for (l = j + 1; l <= n2; l++) {
int LE;
if (i - k + l - j > 2 * MAXLOOP_L - 2)
break;
if (abs(i - k - l + j) >= ASS)
continue;
for (s = LE = 0; s < n_seq; s++) {
type4 = pair[Sali1[s][k]][Sali2[s][l]];
if (type4 == 0)
type4 = 7;
LE +=
E_IntLoop(i - k - 1,
l - j - 1,
type4,
rtype[type[s]],
Sali1[s][k + 1],
Sali2[s][l - 1],
Sali1[s][i - 1],
Sali2[s][j + 1],
P);
}
if (E == c[k][l] + LE) {
traced = 1;
i = k;
j = l;
*Duplex_El += LE;
break;
}
}
if (traced)
break;
}
}
if (!traced) {
for (s = 0; s < n_seq; s++) {
int correction;
correction = vrna_E_ext_stem(type[s],
(i > 1) ? Sali1[s][i - 1] : -1,
(j < n2) ? Sali2[s][j + 1] : -1,
P);
*Duplex_El += correction;
E -= correction;
/**
*** if (i>1) {E -= P->dangle5[type[s]][Sali1[s][i-1]]; *Duplex_El +=P->dangle5[type[s]][Sali1[s][i-1]];}
*** if (j<n2) {E -= P->dangle3[type[s]][Sali2[s][j+1]]; *Duplex_El +=P->dangle3[type[s]][Sali2[s][j+1]];}
*** if (type[s]>2) {E -= P->TerminalAU; *Duplex_El +=P->TerminalAU;}
**/
}
if (E != n_seq * P->DuplexInit)
vrna_message_error("backtrack failed in fold duplex end");
else
break;
}
}
/* if (i>1) i--; */
/* if (j<n2) j++; */
/* struc = (char *) vrna_alloc(i0-i+1+j-j0+1+2); */ /* declare final duplex structure */
struc = (char *)vrna_alloc(i0 - i + 1 + n2 - 1 + 1 + 2); /* declare final duplex structure */
char *struc2;
struc2 = (char *)vrna_alloc(n2 + 1);
/* char * struct_const; */
for (k = MAX2(i, 1); k <= i0; k++)
if (!st1[k - 1])
st1[k - 1] = '.';
/* for (k=j0; k<=j; k++) if (!st2[k-1]) st2[k-1] = struc_loop[k-1];*/ /* '.'; normal */
/* char * struct_const; */
/* struct_const = (char *) vrna_alloc(sizeof(char)*(n2+1)); */
for (k = 1; k <= n2; k++) {
if (!st2[k - 1])
st2[k - 1] = struc_loop[k - 1]; /* '.'; */
struc2[k - 1] = st2[k - 1]; /* '.'; */
/* if (k>=j0 && k<=j){ */
/* struct_const[k-1]='x'; */
/* } */
/* else{ */
/* if(k<j0) {struct_const[k-1]='<';} */
/* if(k>j) {struct_const[k-1]='>';} */
/* } */
}
/* char duplexseq_1[j0+1]; */
/* char duplexseq_2[n2-j+3]; */
if (j < n2) {
char **duplexseq_1, **duplexseq_2;
duplexseq_1 = (char **)vrna_alloc((n_seq + 1) * sizeof(char *));
duplexseq_2 = (char **)vrna_alloc((n_seq + 1) * sizeof(char *));
for (s = 0; s < n_seq; s++) {
duplexseq_1[s] = (char *)vrna_alloc((j0) * sizeof(char)); /* modfied j0+1 */
duplexseq_2[s] = (char *)vrna_alloc((n2 - j + 2) * sizeof(char)); /* modified j+3 */
strncpy(duplexseq_1[s], snoseq[s], j0 - 1); /* modified j0 */
strcpy(duplexseq_2[s], snoseq[s] + j); /* modified j-1 */
duplexseq_1[s][j0 - 1] = '\0'; /* modified j0 */
duplexseq_2[s][n2 - j + 1] = '\0'; /* modified j+2 */
}
duplexseq_1[n_seq] = NULL;
duplexseq_2[n_seq] = NULL;
duplexT temp;
temp = aliduplexfold((const char **)duplexseq_1, (const char **)duplexseq_2);
*Loop_D = MIN2(0, -410 + (int)100 * temp.energy * n_seq);
if (*Loop_D) {
int l1, ibegin, iend, jbegin, jend;
l1 = strchr(temp.structure, '&') - temp.structure;
ibegin = temp.i - l1;
iend = temp.i - 1;
jbegin = temp.j;
jend = temp.j + (int)strlen(temp.structure) - l1 - 2 - 1;
for (k = ibegin + 1; k <= iend + 1; k++)
struc2[k - 1] = temp.structure[k - ibegin - 1];
for (k = jbegin + j; k <= jend + j; k++)
struc2[k - 1] = temp.structure[l1 + k - j - jbegin + 1];
}
for (s = 0; s < n_seq; s++) {
free(duplexseq_1[s]);
free(duplexseq_2[s]);
}
free(duplexseq_1);
free(duplexseq_2);
free(temp.structure);
}
strcpy(struc, st1 + MAX2(i - 1, 0));
strcat(struc, "&");
/* strcat(struc, st2); */
strncat(struc, struc2 + 5, (int)strlen(struc2) - 10);
free(struc2);
free(struc_loop);
free(st1);
free(st2);
free(type);
free(type2);
free(type3);
/* free_arrays(); */
return struc;
}
void
Lsnoop_subopt(const char *s1,
const char *s2,
int delta,
int w,
const int penalty,
const int threshloop,
const int threshLE,
const int threshRE,
const int threshDE,
const int threshTE,
const int threshSE,
const int threshD,
const int distance,
const int half_stem,
const int max_half_stem,
const int min_s2,
const int max_s2,
const int min_s1,
const int max_s1,
const int min_d1,
const int min_d2,
const int alignment_length,
const char *name,
const int fullStemEnergy)
{
int min_colonne = INF;
int max_pos;
int max;
max = INF;
/* int temp; */
/* int nsubopt=10; */
n1 = (int)strlen(s1);
n2 = (int)strlen(s2);
int *position;
position = (int *)vrna_alloc((n1 + 3) * sizeof(int));
/* int Eminj, Emin_l; */
int i, j; /* l1, Emin=INF, i_min=0, j_min=0; */
/* char *struc; */
/* snoopT mfe; */
int *indx;
int *mLoop;
int *cLoop;
folden **foldlist, **foldlist_XS;
int Duplex_El, Duplex_Er;
int Loop_D;
/* int u; */
int Loop_E;
vrna_md_t md;
Duplex_El = 0;
Duplex_Er = 0;
Loop_E = 0, Loop_D = 0;
snoexport_fold_arrays(&indx, &mLoop, &cLoop, &foldlist, &foldlist_XS);
set_model_details(&md);
if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) {
snoupdate_fold_params();
if (P)
free(P);
P = vrna_params(&md);
make_pair_matrix();
}
lc = (int **)vrna_alloc(sizeof(int *) * (5));
lr = (int **)vrna_alloc(sizeof(int *) * (5));
for (i = 0; i < 5; i++) {
lc[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1));
lr[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1));
for (j = n2; j > -1; j--) {
lc[i][j] = INF;
lr[i][j] = INF;
}
}
encode_seqs(s1, s2);
for (i = 1; i <= n1; i++) {
int idx = i % 5;
int idx_1 = (i - 1) % 5;
int idx_2 = (i - 2) % 5;
int idx_3 = (i - 3) % 5;
int idx_4 = (i - 4) % 5;
for (j = n2 - min_d2; j > min_d1; j--) {
int type, type2, k;
type = pair[S1[i]][S2[j]];
lc[idx][j] = (type) ? P->DuplexInit + 2 * penalty : INF;
lr[idx][j] = INF;
if (!type)
continue;
if ( /*pair[S1[i+1]][S2[j-1]] && check that we have a solid base stack after the mLoop */
j < max_s1 && j > min_s1 &&
j > n2 - max_s2 - max_half_stem &&
j < n2 - min_s2 - half_stem && S1[i - 2] == 4) {
/*constraint on s2 and i*/
int min_k, max_k;
max_k = MIN2(n2 - min_s2, j + max_half_stem + 1);
min_k = MAX2(j + half_stem + 1, n2 - max_s2);
for (k = min_k; k <= max_k; k++) {
if (mLoop[indx[k - 1] + j + 1] < 0) {
}
if (pair[S1[i - 3]][S2[k]] /*genau zwei ungepaarte nucleotiden --NU--*/
&& mLoop[indx[k - 1] + j + 1] < threshloop)
lr[idx][j] = MIN2(lr[idx][j], lc[idx_3][k] + mLoop[indx[k - 1] + j + 1]);
else if (pair[S1[i - 4]][S2[k]] && mLoop[indx[k - 1] + j + 1] < threshloop) /*--NUN--*/
lr[idx][j] = MIN2(lr[idx][j], lc[idx_4][k] + mLoop[indx[k - 1] + j + 1]);
}
}
/* dangle 5'SIDE relative to the mRNA */
lc[idx][j] += vrna_E_ext_stem(type, (i > 1) ? SS1[i - 1] : -1, (j < n2) ? SS2[j + 1] : -1, P);
/**
*** if (i>1) lc[idx][j] += P->dangle5[type][SS1[i-1]];
*** if (j<n2) lc[idx][j] += P->dangle3[type][SS2[j+1]];
*** if (type>2) lc[idx][j] += P->TerminalAU;
**/
if (j < n2 && i > 1) {
type2 = pair[S1[i - 1]][S2[j + 1]];
if (type2 > 0) {
lc[idx][j] =
MIN2(lc[idx_1][j + 1] +
E_IntLoop(0, 0, type2, rtype[type], SS1[i], SS2[j], SS1[i - 1], SS2[j + 1],
P) + 2 * penalty,
lc[idx][j]);
lr[idx][j] =
MIN2(lr[idx_1][j + 1] +
E_IntLoop(0, 0, type2, rtype[type], SS1[i], SS2[j], SS1[i - 1], SS2[j + 1],
P) + 2 * penalty,
lr[idx][j]);
}
}
if (j < n2 - 1 && i > 2) {
type2 = pair[S1[i - 2]][S2[j + 2]];
if (type2 > 0) {
lc[idx][j] =
MIN2(lc[idx_2][j + 2] +
E_IntLoop(1, 1, type2, rtype[type], SS1[i - 1], SS2[j + 1], SS1[i - 1], SS2[j + 1],
P) + 4 * penalty,
lc[idx][j]);
lr[idx][j] =
MIN2(lr[idx_2][j + 2] +
E_IntLoop(1, 1, type2, rtype[type], SS1[i - 1], SS2[j + 1], SS1[i - 1], SS2[j + 1],
P) + 4 * penalty,
lr[idx][j]);
}
}
if (j < n2 - 2 && i > 3) {
type2 = pair[S1[i - 3]][S2[j + 3]];
if (type2 > 0) {
lc[idx][j] =
MIN2(lc[idx_3][j + 3] +
E_IntLoop(2, 2, type2, rtype[type], SS1[i - 2], SS2[j + 2], SS1[i - 1], SS2[j + 1],
P) + 6 * penalty,
lc[idx][j]);
lr[idx][j] =
MIN2(lr[idx_3][j + 3] +
E_IntLoop(2, 2, type2, rtype[type], SS1[i - 2], SS2[j + 2], SS1[i - 1], SS2[j + 1],
P) + 6 * penalty,
lr[idx][j]);
}
}
/**
*** (type>2?P->TerminalAU:0)+(i<(n1)?P->dangle3[rtype[type]][SS1[i+1]]+penalty:0)+(j>1?P->dangle5[rtype[type]][SS2[j-1]]+penalty:0)
**/
min_colonne =
MIN2(lr[idx][j] +
vrna_E_ext_stem(rtype[type], (j > 1) ? SS2[j - 1] : -1, (i < n1) ? SS1[i + 1] : -1, P),
min_colonne);
}
position[i] = min_colonne;
if (max >= min_colonne) {
max = min_colonne;
max_pos = i;
}
min_colonne = INF;
}
free(S1);
free(S2);
free(SS1);
free(SS2);
if (max < threshTE) {
find_max_snoop(s1,
s2,
max,
alignment_length,
position,
delta,
distance,
penalty,
threshloop,
threshLE,
threshRE,
threshDE,
threshTE,
threshSE,
threshD,
half_stem,
max_half_stem,
min_s2,
max_s2,
min_s1,
max_s1,
min_d1,
min_d2,
name,
fullStemEnergy);
}
for (i = 1; i < 5; i++) {
free(lc[i]);
free(lr[i]);
}
free(lc[0]);
free(lr[0]);
free(lc);
free(lr);
free(position);
}
void
Lsnoop_subopt_list(const char *s1,
const char *s2,
int delta,
int w,
const int penalty,
const int threshloop,
const int threshLE,
const int threshRE,
const int threshDE,
const int threshTE,
const int threshSE,
const int threshD,
const int distance,
const int half_stem,
const int max_half_stem,
const int min_s2,
const int max_s2,
const int min_s1,
const int max_s1,
const int min_d1,
const int min_d2,
const int alignment_length,
const char *name,
const int fullStemEnergy)
{
int min_colonne = INF;
int max_pos;
int max;
max = INF;
/* int temp; */
/* int nsubopt=10; */
n1 = (int)strlen(s1);
n2 = (int)strlen(s2);
int *position;
position = (int *)vrna_alloc((n1 + 3) * sizeof(int));
/* int Eminj, Emin_l; */
int i, j;/* l1, Emin=INF, i_min=0, j_min=0; */
/* char *struc; */
/* snoopT mfe; */
int *indx;
int *mLoop;
int *cLoop;
folden **foldlist, **foldlist_XS;
int Duplex_El, Duplex_Er;
int Loop_D;
/* int u; */
int Loop_E;
vrna_md_t md;
Duplex_El = 0;
Duplex_Er = 0;
Loop_E = 0, Loop_D = 0;
snoexport_fold_arrays(&indx, &mLoop, &cLoop, &foldlist, &foldlist_XS);
set_model_details(&md);
if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) {
snoupdate_fold_params();
if (P)
free(P);
P = vrna_params(&md);
make_pair_matrix();
}
lpair = (int **)vrna_alloc(sizeof(int *) * (6));
lc = (int **)vrna_alloc(sizeof(int *) * (6));
lr = (int **)vrna_alloc(sizeof(int *) * (6));
for (i = 0; i < 6; i++) {
lc[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1));
lr[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1));
lpair[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1));
for (j = n2; j > -1; j--) {
lc[i][j] = INF;
lr[i][j] = INF;
lpair[i][j] = 0;
}
}
encode_seqs(s1, s2);
int lim_maxj = n2 - min_d2;
int lim_minj = min_d1;
int lim_maxi = n1;
for (i = 5; i <= lim_maxi; i++) {
int idx = i % 5;
int idx_1 = (i - 1) % 5;
int idx_2 = (i - 2) % 5;
int idx_3 = (i - 3) % 5;
int idx_4 = (i - 4) % 5;
for (j = lim_maxj; j > lim_minj; j--) {
int type, type2;/* E, k,l; */
type = pair[S1[i]][S2[j]];
lpair[idx][j] = type;
lc[idx][j] = (type) ? P->DuplexInit + 2 * penalty : INF;
lr[idx][j] = INF;
if (!type)
continue;
if ( /*pair[S1[i+1]][S2[j-1]] && Be sure it binds*/
j < max_s1 && j > min_s1 &&
j > n2 - max_s2 - max_half_stem &&
j < n2 - min_s2 - half_stem && S1[i - 2] == 4) {
/*constraint on s2 and i*/
int min_k, max_k;
max_k = MIN2(n2 - min_s2, j + max_half_stem + 1);
min_k = MAX2(j + half_stem + 1, n2 - max_s2);
folden *temp;
temp = foldlist[j + 1];
while (temp->next) {
int k = temp->k;
/* if(k >= min_k-1 && k < max_k){ comment to recover normal behaviour */
if (lpair[idx_3][k + 1] /*&& lpair[idx_4][k+2]*/)
lr[idx][j] = MIN2(lr[idx][j], lc[idx_3][k + 1] + temp->energy); /*--NU--*/
/*else*/ if (lpair[idx_4][k + 1]) /*--NUN--*/
lr[idx][j] = MIN2(lr[idx][j], lc[idx_4][k + 1] + temp->energy);
/* } */
temp = temp->next;
}
}
/* dangle 5'SIDE relative to the mRNA */
lc[idx][j] += vrna_E_ext_stem(type, SS1[i - 1], SS2[j + 1], P);
/**
*** lc[idx][j] += P->dangle5[type][SS1[i-1]];
*** lc[idx][j] += P->dangle3[type][SS2[j+1]];
*** if (type>2) lc[idx][j] += P->TerminalAU;
**/
/* if(j<n2 && i>1){ */
/* type2=pair[S1[i-1]][S2[j+1]]; */
type2 = lpair[idx_1][j + 1];
if (type2 > 0) {
lc[idx][j] =
MIN2(lc[idx_1][j + 1] +
E_IntLoop(0, 0, type2, rtype[type], SS1[i], SS2[j], SS1[i - 1], SS2[j + 1],
P) + 2 * penalty,
lc[idx][j]);
lr[idx][j] =
MIN2(lr[idx_1][j + 1] +
E_IntLoop(0, 0, type2, rtype[type], SS1[i], SS2[j], SS1[i - 1], SS2[j + 1],
P) + 2 * penalty,
lr[idx][j]);
}
/* } */
/* if(j<n2-1 && i>2){ */
/* type2=pair[S1[i-2]][S2[j+2]]; */
type2 = lpair[idx_2][j + 2];
if (type2 > 0) {
lc[idx][j] =
MIN2(lc[idx_2][j + 2] +
E_IntLoop(1, 1, type2, rtype[type], SS1[i - 1], SS2[j + 1], SS1[i - 1], SS2[j + 1],
P),
lc[idx][j]);
lr[idx][j] =
MIN2(lr[idx_2][j + 2] +
E_IntLoop(1, 1, type2, rtype[type], SS1[i - 1], SS2[j + 1], SS1[i - 1], SS2[j + 1],
P),
lr[idx][j]);
/* } */
}
/* if(j<n2-2 && i>3){ */
/* type2 = pair[S1[i-3]][S2[j+3]]; */
type2 = lpair[idx_3][j + 3];
if (type2 > 0) {
lc[idx][j] =
MIN2(lc[idx_3][j + 3] +
E_IntLoop(2, 2, type2, rtype[type], SS1[i - 2], SS2[j + 2], SS1[i - 1], SS2[j + 1],
P) + 6 * penalty,
lc[idx][j]);
lr[idx][j] =
MIN2(lr[idx_3][j + 3] +
E_IntLoop(2, 2, type2, rtype[type], SS1[i - 2], SS2[j + 2], SS1[i - 1], SS2[j + 1],
P) + 6 * penalty,
lr[idx][j]);
/* } */
}
/* min_colonne=MIN2(lr[idx][j]+(type>2?P->TerminalAU:0)+P->dangle3[rtype[type]][SS1[i+1]]+P->dangle5[rtype[type]][SS2[j-1]], min_colonne); */
int bla;
bla = lr[idx][j] + vrna_E_ext_stem(rtype[type], SS2[j - 1], SS1[i + 1], P) + 2 * penalty;
min_colonne = MIN2(bla, min_colonne);
}
position[i] = min_colonne;
if (max >= min_colonne) {
max = min_colonne;
max_pos = i;
}
min_colonne = INF;
}
free(S1);
free(S2);
free(SS1);
free(SS2);
if (max < threshTE) {
find_max_snoop(s1,
s2,
max,
alignment_length,
position,
delta,
distance,
penalty,
threshloop,
threshLE,
threshRE,
threshDE,
threshTE,
threshSE,
threshD,
half_stem,
max_half_stem,
min_s2,
max_s2,
min_s1,
max_s1,
min_d1,
min_d2,
name,
fullStemEnergy);
}
for (i = 1; i < 6; i++) {
free(lc[i]);
free(lr[i]);
free(lpair[i]);
}
free(lc[0]);
free(lr[0]);
free(lpair[0]);
free(lc);
free(lr);
free(lpair);
free(position);
}
PRIVATE void
find_max_snoop(const char *s1,
const char *s2,
const int max,
const int alignment_length,
const int *position,
const int delta,
const int distance,
const int penalty,
const int threshloop,
const int threshLE,
const int threshRE,
const int threshDE,
const int threshTE,
const int threshSE,
const int threshD,
const int half_stem,
const int max_half_stem,
const int min_s2,
const int max_s2,
const int min_s1,
const int max_s1,
const int min_d1,
const int min_d2,
const char *name,
const int fullStemEnergy)
{
int count = 0;
int pos = n1 + 1;
int threshold = MIN2(threshTE, max + delta);
/* printf("threshTE %d max %d\n", threshTE, max); */
/* #pragma omp parallel for */
/* for(pos=n1+1;pos>distance;pos--){ */
while (pos-- > 5) {
int temp_min = 0;
if (position[pos] < (threshold)) {
int search_range;
search_range = distance + 1;
while (--search_range)
if (position[pos - search_range] <= position[pos - temp_min])
temp_min = search_range;
pos -= temp_min;
int begin = MAX2(6, pos - alignment_length + 1);
char *s3 = (char *)vrna_alloc(sizeof(char) * (pos - begin + 3 + 12));
strcpy(s3, "NNNNN");
strncat(s3, (s1 + begin - 1), pos - begin + 2);
strcat(s3, "NNNNN\0");
/* printf("%s s3\n", s3); */
snoopT test;
test = snoopfold(s3,
s2,
penalty,
threshloop,
threshLE,
threshRE,
threshDE,
threshD,
half_stem,
max_half_stem,
min_s2,
max_s2,
min_s1,
max_s1,
min_d1,
min_d2,
fullStemEnergy);
if (test.energy == INF) {
free(s3);
continue;
}
if (test.Duplex_El > threshLE * 0.01 || test.Duplex_Er > threshRE * 0.01 ||
test.Loop_D > threshD * 0.01 || (test.Duplex_Er + test.Duplex_El) > threshDE * 0.01 ||
(test.Duplex_Er + test.Duplex_El + test.Loop_E + test.Loop_D + 410) > threshSE * 0.01) {
free(test.structure);
free(s3);
continue;
}
int l1;
l1 = strchr(test.structure, '&') - test.structure;
int shift = 0;
if (test.i > (int)strlen(s3) - 10) {
test.i--;
l1--;
}
if (test.i - l1 < 0) {
l1--;
shift++;
}
char *target_struct = (char *)vrna_alloc(sizeof(char) * (strlen(test.structure) + 1));
strncpy(target_struct, test.structure + shift, l1);
strncat(target_struct, test.structure + (strchr(test.structure, '&') -
test.structure),
(int)strlen(test.structure) - (strchr(test.structure, '&') -
test.
structure));
strcat(target_struct, "\0");
char *target;
target = (char *)vrna_alloc(l1 + 1);
strncpy(target, (s3 + test.i + 5 - l1), l1);
target[l1] = '\0';
char *s4;
s4 = (char *)vrna_alloc(sizeof(char) * (strlen(s2) - 9));
strncpy(s4, s2 + 5, (int)strlen(s2) - 10);
s4[(int)strlen(s2) - 10] = '\0';
printf(
"%s %3d,%-3d;%3d : %3d,%-3d (%5.2f = %5.2f + %5.2f + %5.2f + %5.2f + 4.1 ) (%5.2f) \n%s&%s\n",
target_struct,
begin + test.i - 5 - l1,
begin + test.i - 6,
begin + test.u - 6,
test.j + 1,
test.j + (int)(strrchr(test.structure, '>') - strchr(test.structure, '>')) + 1,
test.Loop_D + test.Duplex_El + test.Duplex_Er + test.Loop_E + 4.10,
test.Duplex_El,
test.Duplex_Er,
test.Loop_E,
test.Loop_D,
test.fullStemEnergy,
target,
s4);
if (name) {
char *temp_seq;
char *temp_struc;
char *psoutput;
temp_seq = (char *)vrna_alloc(sizeof(char) * (l1 + n2 - 9));
temp_struc = (char *)vrna_alloc(sizeof(char) * (l1 + n2 - 9));
strcpy(temp_seq, target);
strcat(temp_seq, s4);
strncpy(temp_struc, target_struct, l1);
strcat(temp_struc, target_struct + l1 + 1);
temp_seq[n2 + l1 - 10] = '\0';
temp_struc[n2 + l1 - 10] = '\0';
cut_point = l1 + 1;
psoutput = vrna_strdup_printf("sno_%d_u_%d_%s.ps",
count,
begin + test.u - 6,
name);
PS_rna_plot_snoop_a(temp_seq, temp_struc, psoutput, NULL, NULL);
cut_point = -1;
free(temp_seq);
free(temp_struc);
free(psoutput);
count++;
/* free(psoutput); */
}
free(s4);
free(test.structure);
free(target_struct);
free(target);
free(s3);
}
}
}
snoopT
snoopfold(const char *s1,
const char *s2,
const int penalty,
const int threshloop,
const int threshLE,
const int threshRE,
const int threshDE,
const int threshD,
const int half_stem,
const int max_half_stem,
const int min_s2,
const int max_s2,
const int min_s1,
const int max_s1,
const int min_d1,
const int min_d2,
const int fullStemEnergy)
{
/* int Eminj, Emin_l; */
int i, j, l1, Emin = INF, i_min = 0, j_min = 0;
char *struc;
snoopT mfe;
int *indx;
int *mLoop;
int *cLoop;
folden **foldlist, **foldlist_XS;
int Duplex_El, Duplex_Er;
int Loop_D;
int u;
int Loop_E;
vrna_md_t md;
Duplex_El = 0;
Duplex_Er = 0;
Loop_E = 0, Loop_D = 0;
snoexport_fold_arrays(&indx, &mLoop, &cLoop, &foldlist, &foldlist_XS);
n1 = (int)strlen(s1);
n2 = (int)strlen(s2);
set_model_details(&md);
if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) {
snoupdate_fold_params();
if (P)
free(P);
P = vrna_params(&md);
make_pair_matrix();
}
c = (int **)vrna_alloc(sizeof(int *) * (n1 + 1));
r = (int **)vrna_alloc(sizeof(int *) * (n1 + 1));
for (i = 0; i <= n1; i++) {
c[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1));
r[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1));
for (j = n2; j > -1; j--) {
c[i][j] = INF;
r[i][j] = INF;
}
}
encode_seqs(s1, s2);
for (i = 6; i <= n1 - 5; i++) {
for (j = n2 - min_d2; j > min_d1; j--) {
int type, type2, E, k, l;
type = pair[S1[i]][S2[j]];
c[i][j] = (type) ? P->DuplexInit : INF;
if (!type)
continue;
if (/* pair[S1[i+1]][S2[j-1]] && */
j < max_s1 && j > min_s1 &&
j > n2 - max_s2 - max_half_stem &&
j < n2 - min_s2 - half_stem && S1[i - 2] == 4) {
/*constraint on s2 and i*/
int min_k, max_k;
max_k = MIN2(n2 - min_s2, j + max_half_stem);
min_k = MAX2(j + half_stem, n2 - max_s2);
folden *temp;
temp = foldlist[j + 1];
while (temp->next) {
int k = temp->k;
/* if(k >= min_k-1 && k < max_k){ uncomment to recovernormal behaviour */
if (pair[S1[i - 3]][S2[k + 1]] /*&& pair[S1[i-4]][S2[k+2]]*/)
r[i][j] = MIN2(r[i][j], c[i - 3][k + 1] + temp->energy);
/*else*/ if (pair[S1[i - 4]][S2[k + 1]] /*&& pair[S1[i-5]][S2[k+2]]*/)
r[i][j] = MIN2(r[i][j], c[i - 4][k + 1] + temp->energy);
/* } */
temp = temp->next;
}
}
/* dangle 5'SIDE relative to the mRNA */
/**
*** c[i][j] += P->dangle5[type][SS1[i-1]];
*** c[i][j] += P->dangle3[type][SS2[j+1]];
*** if (type>2) c[i][j] += P->TerminalAU;
**/
c[i][j] += vrna_E_ext_stem(type, SS1[i - 1], SS2[j + 1], P);
for (k = i - 1; k > 0 && (i - k) < MAXLOOP_L; k--) {
for (l = j + 1; l <= n2; l++) {
if (i - k + l - j > 2 * MAXLOOP_L - 2)
break;
if (abs(i - k - l + j) >= ASS)
continue;
type2 = pair[S1[k]][S2[l]];
if (!type2)
continue;
E = E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type],
SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P);
c[i][j] = MIN2(c[i][j], c[k][l] + E + (i - k + l - j) * penalty);
r[i][j] = MIN2(r[i][j], r[k][l] + E + (i - k + l - j) * penalty);
}
}
E = r[i][j];
/**
*** if (i<n1) E += P->dangle3[rtype[type]][SS1[i+1]];
*** if (j>1) E += P->dangle5[rtype[type]][SS2[j-1]];
*** f (type>2) E += P->TerminalAU;
**/
E += vrna_E_ext_stem(rtype[type], (j > 1) ? SS2[j - 1] : -1, (i < n1) ? SS1[i + 1] : -1, P);
if (E < Emin) {
Emin = E;
i_min = i;
j_min = j;
}
}
}
if (Emin > 0) {
printf("no target found under the constraints chosen\n");
for (i = 0; i <= n1; i++) {
free(r[i]);
free(c[i]);
}
free(c);
free(r);
free(S1);
free(S2);
free(SS1);
free(SS2);
mfe.energy = INF;
return mfe;
}
struc = snoop_backtrack(i_min, j_min, s2, &Duplex_El, &Duplex_Er, &Loop_E, &Loop_D,
&u, penalty, threshloop, threshLE, threshRE, threshDE, threshD,
half_stem, max_half_stem, min_s2, max_s2, min_s1, max_s1, min_d1, min_d2);
/* if (i_min<n1-5) i_min++; */
/* if (j_min>1 ) j_min--; */
l1 = strchr(struc, '&') - struc;
mfe.i = i_min - 5;
mfe.j = j_min - 5;
mfe.u = u - 5;
mfe.Duplex_Er = (float)Duplex_Er / 100;
mfe.Duplex_El = (float)Duplex_El / 100;
mfe.Loop_D = (float)Loop_D / 100;
mfe.Loop_E = (float)Loop_E / 100;
mfe.energy = (float)Emin / 100;
mfe.fullStemEnergy = (float)fullStemEnergy / 100;
mfe.structure = struc;
if (!delay_free) {
for (i = 0; i <= n1; i++) {
free(r[i]);
free(c[i]);
}
free(c);
free(r);
free(S1);
free(S2);
free(SS1);
free(SS2);
}
return mfe;
}
PRIVATE int
snoopfold_XS_fill(const char *s1,
const char *s2,
const int **access_s1,
const int penalty,
const int threshloop,
const int threshLE,
const int threshRE,
const int threshDE,
const int threshD,
const int half_stem,
const int max_half_stem,
const int min_s2,
const int max_s2,
const int min_s1,
const int max_s1,
const int min_d1,
const int min_d2)
{
/* int Eminj, Emin_l; */
int i, j, Emin = INF, i_min = 0, j_min = 0;
/* char *struc; */
/* snoopT mfe; */
int *indx;
int *mLoop;
int *cLoop;
folden **foldlist, **foldlist_XS;
int Duplex_El, Duplex_Er;
int Loop_D;
/* int u; */
int Loop_E;
vrna_md_t md;
Duplex_El = 0;
Duplex_Er = 0;
Loop_E = 0, Loop_D = 0;
snoexport_fold_arrays(&indx, &mLoop, &cLoop, &foldlist, &foldlist_XS);
n1 = (int)strlen(s1);
n2 = (int)strlen(s2);
set_model_details(&md);
if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) {
snoupdate_fold_params();
if (P)
free(P);
P = vrna_params(&md);
make_pair_matrix();
}
c_fill = (int **)vrna_alloc(sizeof(int *) * (n1 + 1));
r_fill = (int **)vrna_alloc(sizeof(int *) * (n1 + 1));
for (i = 0; i <= n1; i++) {
c_fill[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1));
r_fill[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1));
for (j = n2; j > -1; j--) {
c_fill[i][j] = INF;
r_fill[i][j] = INF;
}
}
encode_seqs(s1, s2);
int di[5];
di[0] = 0;
for (i = 6; i <= n1 - 5; i++) {
di[1] = access_s1[5][i] - access_s1[4][i - 1];
di[2] = access_s1[5][i - 1] - access_s1[4][i - 2] + di[1];
di[3] = access_s1[5][i - 2] - access_s1[4][i - 3] + di[2];
di[4] = access_s1[5][i - 3] - access_s1[4][i - 4] + di[3];
di[1] = MIN2(di[1], 165);
di[2] = MIN2(di[2], 330);
di[3] = MIN2(di[3], 495);
di[4] = MIN2(di[4], 660);
for (j = n2 - min_d2; j > min_d1; j--) {
int type, type2, E, k, l;
type = pair[S1[i]][S2[j]];
c_fill[i][j] = (type) ? P->DuplexInit : INF;
if (!type)
continue;
if (/* pair[S1[i+1]][S2[j-1]] && */
j < max_s1 && j > min_s1 &&
j > n2 - max_s2 - max_half_stem &&
j < n2 - min_s2 - half_stem && S1[i - 2] == 4) {
/*constraint on s2 and i*/
int min_k, max_k;
max_k = MIN2(n2 - min_s2, j + max_half_stem);
min_k = MAX2(j + half_stem, n2 - max_s2);
folden *temp;
temp = foldlist[j + 1];
while (temp->next) {
int k = temp->k;
/* if(k >= min_k-1 && k < max_k){ uncomment to recovernormal behaviour */
if (pair[S1[i - 3]][S2[k + 1]] /*&& pair[S1[i-4]][S2[k+2]]*/)
r_fill[i][j] = MIN2(r_fill[i][j], c_fill[i - 3][k + 1] + temp->energy + di[3]);
/*else*/ if (pair[S1[i - 4]][S2[k + 1]] /*&& pair[S1[i-5]][S2[k+2]]*/)
r_fill[i][j] = MIN2(r_fill[i][j], c_fill[i - 4][k + 1] + temp->energy + di[4]);
/* } */
temp = temp->next;
}
}
/* dangle 5'SIDE relative to the mRNA */
/**
*** c_fill[i][j] += P->dangle5[type][SS1[i-1]];
*** c_fill[i][j] += P->dangle3[type][SS2[j+1]];
*** if (type>2) c_fill[i][j] += P->TerminalAU;
**/
c_fill[i][j] += vrna_E_ext_stem(type, SS1[i - 1], SS2[j + 1], P);
for (k = i - 1; k > 0 && (i - k) < MAXLOOP_L; k--) {
for (l = j + 1; l <= n2; l++) {
if (i - k + l - j > 2 * MAXLOOP_L - 2)
break;
if (abs(i - k - l + j) >= ASS)
continue;
type2 = pair[S1[k]][S2[l]];
if (!type2)
continue;
E = E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type],
SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P);
c_fill[i][j] = MIN2(c_fill[i][j], c_fill[k][l] + E + di[i - k]);
r_fill[i][j] = MIN2(r_fill[i][j], r_fill[k][l] + E + di[i - k]);
}
}
E = r_fill[i][j];
/**
*** if (i<n1) E += P->dangle3[rtype[type]][SS1[i+1]];
*** if (j>1) E += P->dangle5[rtype[type]][SS2[j-1]];
*** if (type>2) E += P->TerminalAU;
**/
E += vrna_E_ext_stem(rtype[type], (j > 1) ? SS2[j - 1] : -1, (i < n1) ? SS1[i + 1] : -1, P);
if (E < Emin) {
Emin = E;
i_min = i;
j_min = j;
}
}
}
return Emin;
}
PUBLIC snoopT *
snoop_subopt(const char *s1,
const char *s2,
int delta,
int w,
const int penalty,
const int threshloop,
const int threshLE,
const int threshRE,
const int threshDE,
const int threshTE,
const int threshSE,
const int threshD,
const int distance,
const int half_stem,
const int max_half_stem,
const int min_s2,
const int max_s2,
const int min_s1,
const int max_s1,
const int min_d1,
const int min_d2,
const int fullStemEnergy)
{
/* printf("%d %d\n", min_s2, max_s2); */
int i, j, n1, n2, E, n_subopt = 0, n_max;
char *struc;
snoopT mfe;
snoopT *subopt;
int thresh;
int Duplex_El, Duplex_Er, Loop_E;
int Loop_D;
Duplex_El = 0;
Duplex_Er = 0;
Loop_E = 0;
Loop_D = 0;
int u;
u = 0;
n_max = 16;
subopt = (snoopT *)vrna_alloc(n_max * sizeof(snoopT));
delay_free = 1;
mfe = snoopfold(s1, s2, penalty, threshloop, threshLE, threshRE, threshDE, threshD,
half_stem, max_half_stem,
min_s2, max_s2, min_s1, max_s1, min_d1, min_d2, fullStemEnergy);
if (mfe.energy > 0) {
free(subopt);
delay_free = 0;
return NULL;
}
thresh = MIN2((int)((mfe.Duplex_Er + mfe.Duplex_El + mfe.Loop_E) * 100 + 0.1 + 410) + delta,
threshTE);
/* subopt[n_subopt++]=mfe; */
free(mfe.structure);
n1 = (int)strlen(s1);
n2 = (int)strlen(s2);
for (i = n1; i > 0; i--) {
for (j = 1; j <= n2; j++) {
int type, Ed;
type = pair[S2[j]][S1[i]];
if (!type)
continue;
E = Ed = r[i][j];
/**
*** if (i<n1) Ed += P->dangle3[type][SS1[i+1]];
*** if (j>1) Ed += P->dangle5[type][SS2[j-1]];
*** if (type>2) Ed += P->TerminalAU;
**/
Ed += vrna_E_ext_stem(type, (j > 1) ? SS2[j - 1] : -1, (i < n1) ? SS1[i + 1] : -1, P);
if (Ed > thresh)
continue;
/* too keep output small, remove hits that are dominated by a
* better one close (w) by. For simplicity we do test without
* adding dangles, which is slightly inaccurate.
*/
/* w=1; */
/* for (ii=MAX2(i-w,1); (ii<=MIN2(i+w,n1)) && type; ii++) { */
/* for (jj=MAX2(j-w,1); jj<=MIN2(j+w,n2); jj++) */
/* if (r[ii][jj]<E) {type=0; break;} */
/* } */
if (!type)
continue;
struc = snoop_backtrack(i,
j,
s2,
&Duplex_El,
&Duplex_Er,
&Loop_E,
&Loop_D,
&u,
penalty,
threshloop,
threshLE,
threshRE,
threshDE,
threshD,
half_stem,
max_half_stem,
min_s2,
max_s2,
min_s1,
max_s1,
min_d1,
min_d2);
if (Duplex_Er > threshRE || Duplex_El > threshLE || Loop_D > threshD ||
(Duplex_Er + Duplex_El) > threshDE ||
(Duplex_Er + Duplex_El + Loop_E) > threshTE ||
(Duplex_Er + Duplex_El + Loop_E + Loop_D + 410) > threshSE) {
/* printf(" Duplex_Er %d threshRE %d Duplex_El %d threshLE %d \n" */
/* " Duplex_Er + Duplex_El %d threshDE %d \n" */
/* " Duplex_Er + Duplex_El + Loop_E %d threshTE %d \n" */
/* " Duplex_Er + Duplex_El + Loop_E + Loop_D %d threshSE %d \n", */
/* Duplex_Er , threshRE , Duplex_El ,threshLE, */
/* Duplex_Er + Duplex_El, threshDE, */
/* Duplex_Er + Duplex_El+ Loop_E , threshTE, */
/* Duplex_Er + Duplex_El+ Loop_E + Loop_D, threshSE); */
Duplex_Er = 0;
Duplex_El = 0;
Loop_E = 0;
Loop_D = 0;
u = 0,
free(struc);
continue;
}
if (n_subopt + 1 >= n_max) {
n_max *= 2;
subopt = (snoopT *)vrna_realloc(subopt, n_max * sizeof(snoopT));
}
subopt[n_subopt].i = i - 5;
subopt[n_subopt].j = j - 5;
subopt[n_subopt].u = u - 5;
subopt[n_subopt].Duplex_Er = Duplex_Er * 0.01;
subopt[n_subopt].Duplex_El = Duplex_El * 0.01;
subopt[n_subopt].Loop_E = Loop_E * 0.01;
subopt[n_subopt].Loop_D = Loop_D * 0.01;
subopt[n_subopt].energy = (Duplex_Er + Duplex_El + Loop_E + Loop_D + 410) * 0.01;
subopt[n_subopt].fullStemEnergy = (float)fullStemEnergy * 0.01;
subopt[n_subopt++].structure = struc;
Duplex_Er = 0;
Duplex_El = 0;
Loop_E = 0;
Loop_D = 0;
u = 0;
}
}
for (i = 0; i <= n1; i++) {
free(c[i]);
free(r[i]);
}
free(c);
free(r);
free(S1);
free(S2);
free(SS1);
free(SS2);
delay_free = 0;
if (snoop_subopt_sorted)
qsort(subopt, n_subopt, sizeof(snoopT), compare);
subopt[n_subopt].i = 0;
subopt[n_subopt].j = 0;
subopt[n_subopt].structure = NULL;
return subopt;
}
PUBLIC void
snoop_subopt_XS(const char *s1,
const char *s2,
const int **access_s1,
int delta,
int w,
const int penalty,
const int threshloop,
const int threshLE,
const int threshRE,
const int threshDE,
const int threshTE,
const int threshSE,
const int threshD,
const int distance,
const int half_stem,
const int max_half_stem,
const int min_s2,
const int max_s2,
const int min_s1,
const int max_s1,
const int min_d1,
const int min_d2,
const int alignment_length,
const char *name,
const int fullStemEnergy)
{
/* printf("%d %d\n", min_s2, max_s2); */
int i, j, E, n_max;
/* char *struc; */
/* snoopT mfe; */
int thresh;
int Duplex_El, Duplex_Er, Loop_E;
int Loop_D;
Duplex_El = 0;
Duplex_Er = 0;
Loop_E = 0;
Loop_D = 0;
int u;
u = 0;
n_max = 16;
delay_free = 1;
int Emin = snoopfold_XS_fill(s1,
s2,
access_s1,
penalty,
threshloop,
threshLE,
threshRE,
threshDE,
threshD,
half_stem,
max_half_stem,
min_s2,
max_s2,
min_s1,
max_s1,
min_d1,
min_d2);
if (Emin > 0)
delay_free = 0;
thresh = MIN2(-100, threshTE + alignment_length * 30);
/* n1=(int)strlen(s1); */
/* n2=(int)strlen(s2); */
int n3 = (int)strlen(s1);
int n4 = (int)strlen(s2);
S1_fill = (short *)vrna_alloc(sizeof(short) * (n3 + 2));
S2_fill = (short *)vrna_alloc(sizeof(short) * (n4 + 2));
SS1_fill = (short *)vrna_alloc(sizeof(short) * (n3 + 1));
SS2_fill = (short *)vrna_alloc(sizeof(short) * (n4 + 1));
memcpy(S1_fill, S1, sizeof(short) * n3 + 2);
memcpy(S2_fill, S2, sizeof(short) * n4 + 2);
memcpy(SS1_fill, SS1, sizeof(short) * n3 + 1);
memcpy(SS2_fill, SS2, sizeof(short) * n4 + 1);
free(S1);
free(S2);
free(SS1);
free(SS2);
int count = 0;
for (i = n3 - 5; i > 0; i--) {
for (j = 1; j <= n4; j++) {
int type, Ed;
type = pair[S2_fill[j]][S1_fill[i]];
if (!type)
continue;
E = Ed = r_fill[i][j];
/**
***if (i<n3) Ed += P->dangle3[type][SS1_fill[i+1]];
***if (j>1) Ed += P->dangle5[type][SS2_fill[j-1]];
***if (type>2) Ed += P->TerminalAU;
**/
Ed += vrna_E_ext_stem(type, (j > 1) ? SS2[j - 1] : -1, (i < n3) ? SS1[i + 1] : -1, P);
if (Ed > thresh)
continue;
/* to keep output small, remove hits that are dominated by a
* better one close (w) by. For simplicity we do test without
* adding dangles, which is slightly inaccurate.
*/
/* w=10; */
/* for (ii=MAX2(i-w,1); (ii<=MIN2(i+w,n3-5)) && type; ii++) { */
/* for (jj=MAX2(j-w,1); jj<=MIN2(j+w,n4-5); jj++) */
/* if (r_fill[ii][jj]<E) {type=0; break;} */
/* } */
/* i=ii;j=jj; */
if (!type)
continue;
int begin = MAX2(5, i - alignment_length);
int end = MIN2(n3 - 5, i - 1);
char *s3 = (char *)vrna_alloc(sizeof(char) * (end - begin + 2) + 5);
strncpy(s3, (s1 + begin), end - begin + 1);
strcat(s3, "NNNNN\0");
int n5 = (int)strlen(s3);
snoopT test = snoopfold_XS(s3, s2, access_s1, i, j, penalty,
threshloop, threshLE, threshRE,
threshDE, threshD, half_stem,
max_half_stem, min_s2, max_s2, min_s1,
max_s1, min_d1, min_d2, fullStemEnergy);
if (test.energy == INF) {
free(s3);
continue;
}
if (test.Duplex_El > threshLE * 0.01 || test.Duplex_Er > threshRE * 0.01 ||
test.Loop_D > threshD * 0.01 || (test.Duplex_Er + test.Duplex_El) > threshDE * 0.01 ||
(test.Duplex_Er + test.Duplex_El + test.Loop_E) > threshTE * 0.01 ||
(test.Duplex_Er + test.Duplex_El + test.Loop_E + test.Loop_D + 410) > threshSE * 0.01) {
free(test.structure);
free(s3);
continue;
}
char *s4;
s4 = (char *)vrna_alloc(sizeof(char) * (n4 - 9));
strncpy(s4, s2 + 5, n4 - 10);
s4[n4 - 10] = '\0';
char *s5 = vrna_alloc(sizeof(char) * n5 - test.i + 2 - 5);
strncpy(s5, s3 + test.i - 1, n5 - test.i + 1 - 5);
s5[n5 - test.i + 1 - 5] = '\0';
float dE = ((float)(access_s1[n5 - test.i + 1 - 5][i])) * 0.01;
printf(
"%s %3d,%-3d;%3d : %3d,%-3d (%5.2f = %5.2f + %5.2f + %5.2f + %5.2f + %5.2f + 4.10) (%5.2f)\n%s&%s\n",
test.structure,
i - (n5 - test.i),
i - 5,
i - (n5 - test.u),
j - 5,
j - 5 + (int)(strrchr(test.structure, '>') - strchr(test.structure, '>')),
test.Loop_D + test.Duplex_El + test.Duplex_Er + test.Loop_E + 4.10 + dE,
test.Duplex_El,
test.Duplex_Er,
test.Loop_E,
test.Loop_D,
dE,
test.fullStemEnergy,
s5,
s4);
if (name) {
int begin_t, end_t, begin_q, end_q, and, pipe, k;
char *psoutput;
begin_q = 0;
end_q = n4 - 10;
begin_t = 0;
end_t = n5 - test.i + 1 - 5;
and = end_t + 1;
pipe = test.u - test.i + 1;
cut_point = end_t + 1;
char *catseq, *catstruct;/* *fname; */
catseq = (char *)vrna_alloc(n5 + end_q - begin_q + 2);
catstruct = (char *)vrna_alloc(n5 + end_q - begin_q + 2);
strcpy(catseq, s5);
strncpy(catstruct, test.structure, end_t);
strcat(catseq, s4);
strncat(catstruct, test.structure + end_t + 1, end_q - begin_q + 1);
catstruct[end_t - begin_t + end_q - begin_q + 2] = '\0';
catseq[end_t - begin_t + end_q - begin_q + 2] = '\0';
int *relative_access;
relative_access = vrna_alloc(sizeof(int) * strlen(s5));
relative_access[0] = access_s1[1][i - (n5 - test.i) + 5];
for (k = 1; k < (int)strlen(s5); k++)
relative_access[k] = access_s1[k + 1][i - (n5 - test.i) + k + 5] -
access_s1[k][i - (n5 - test.i) + k + 4];
psoutput = vrna_strdup_printf("sno_XS_%d_u_%d_%s.ps",
count,
i - (n5 - test.u),
name);
PS_rna_plot_snoop_a(catseq, catstruct, psoutput, relative_access, NULL);
free(catseq);
free(catstruct);
free(relative_access);
free(psoutput);
count++;
}
free(s3);
free(s4);
free(s5);
free(test.structure);
}
}
for (i = 0; i <= n3; i++) {
free(c_fill[i]);
free(r_fill[i]);
}
free(c_fill);
free(r_fill);
free(S1_fill);
free(S2_fill);
free(SS1_fill);
free(SS2_fill);
delay_free = 0;
}
PRIVATE char *
snoop_backtrack(int i,
int j,
const char *snoseq,
int *Duplex_El,
int *Duplex_Er,
int *Loop_E,
int *Loop_D,
int *u,
const int penalty,
const int threshloop,
const int threshLE,
const int threshRE,
const int threshDE,
const int threshD,
const int half_stem,
const int max_half_stem,
const int min_s2,
const int max_s2,
const int min_s1,
const int max_s1,
const int min_d1,
const int min_d2)
{
/* backtrack structure going backwards from i, and forwards from j
* return structure in bracket notation with & as separator */
int k, l, type, type2, E, traced, i0, j0;
int traced_r = 0; /* flag for following backtrack in c or r */
char *st1, *st2, *struc;
char *struc_loop;
st1 = (char *)vrna_alloc(sizeof(char) * (n1 + 1));
st2 = (char *)vrna_alloc(sizeof(char) * (n2 + 1));
int *indx;
int *mLoop;
int *cLoop;
folden **foldlist, **foldlist_XS;
type = pair[S1[i]][S2[j]];
snoexport_fold_arrays(&indx, &mLoop, &cLoop, &foldlist, &foldlist_XS);
i0 = i;
j0 = j;
/**
*** if (i<n1) *Duplex_Er += P->dangle3[rtype[type]][SS1[i+1]];
*** if (j>1) *Duplex_Er += P->dangle5[rtype[type]][SS2[j-1]];
*** if (type>2) *Duplex_Er += P->TerminalAU;
**/
*Duplex_Er += vrna_E_ext_stem(rtype[type], (j > 1) ? SS2[j - 1] : -1, (i < n1) ? SS1[i + 1] : -1, P);
while (i > 0 && j <= n2 - min_d2) {
if (!traced_r) {
E = r[i][j];
traced = 0;
st1[i - 1] = '<';
st2[j - 1] = '>';
type = pair[S1[i]][S2[j]];
if (!type)
vrna_message_error("backtrack failed in fold duplex r");
for (k = i - 1; k > 0 && (i - k) < MAXLOOP_L; k--) {
for (l = j + 1; l <= n2; l++) {
int LE;
if (i - k + l - j > 2 * MAXLOOP_L - 2)
break;
if (abs(i - k - l + j) >= ASS)
continue;
type2 = pair[S1[k]][S2[l]];
if (!type2)
continue;
LE = E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type],
SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P);
if (E == r[k][l] + LE + (i - k + l - j) * penalty) {
traced = 1;
i = k;
j = l;
*Duplex_Er += LE;
break;
}
}
if (traced)
break;
}
if (!traced) {
if (/* pair[S1[i+1]][S2[j-1]] && */
j < max_s1 && j > min_s1 &&
j > n2 - max_s2 - max_half_stem &&
j < n2 - min_s2 - half_stem &&
S1[i - 2] == 4) {
int min_k, max_k;
max_k = MIN2(n2 - min_s2, j + max_half_stem + 1);
min_k = MAX2(j + half_stem + 1, n2 - max_s2);
folden *temp;
temp = foldlist[j + 1];
while (temp->next) {
int k = temp->k;
if (pair[S1[i - 3]][S2[k + 1]] /*&& pair[S1[i-4]][S2[k+2]]*/) {
/* introduce structure from RNAfold */
if (E == c[i - 3][k + 1] + temp->energy) {
*Loop_E = temp->energy;
st1[i - 3] = '|';
*u = i - 2;
int a, b;
/* int fix_ij=indx[k-1+1]+j+1; */
for (a = 0; a < MISMATCH; a++) {
for (b = 0; b < MISMATCH; b++) {
int ij = indx[k - 1 - a + 1] + j + 1 + b;
if (cLoop[ij] == temp->energy) {
struc_loop = snobacktrack_fold_from_pair(snoseq, j + 1 + b, k - a - 1 + 1);
a = INF;
b = INF;
}
}
}
traced = 1;
traced_r = 1;
i = i - 3;
j = k + 1;
break;
}
}
/*else*/ if (pair[S1[i - 4]][S2[k + 1]] /*&& pair[S1[i-5]][S2[k+2]]*/) {
/* introduce structure from RNAfold */
if (E == c[i - 4][k + 1] + temp->energy) {
*Loop_E = temp->energy;
st1[i - 3] = '|';
*u = i - 2;
int a, b;
/* int fix_ij=indx[k-1+1]+j+1; */
for (a = 0; a < MISMATCH; a++) {
for (b = 0; b < MISMATCH; b++) {
int ij = indx[k - 1 - a + 1] + j + 1 + b;
if (cLoop[ij] == temp->energy) {
struc_loop = snobacktrack_fold_from_pair(snoseq, j + 1 + b, k - a - 1 + 1);
a = INF;
b = INF;
}
}
}
traced = 1;
traced_r = 1;
i = i - 4;
j = k + 1;
break;
}
} /* else if */
temp = temp->next;
} /* while temp-> next */
} /* test on j */
} /* traced? */
} /* traced_r? */
else {
E = c[i][j];
traced = 0;
st1[i - 1] = '<';
st2[j - 1] = '>';
type = pair[S1[i]][S2[j]];
if (!type)
vrna_message_error("backtrack failed in fold duplex c");
for (k = i - 1; (i - k) < MAXLOOP_L; k--) {
for (l = j + 1; l <= n2; l++) {
int LE;
if (i - k + l - j > 2 * MAXLOOP_L - 2)
break;
if (abs(i - k - l + j) >= ASS)
continue;
type2 = pair[S1[k]][S2[l]];
if (!type2)
continue;
LE = E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type],
SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P);
if (E == c[k][l] + LE + (i - k + l - j) * penalty) {
traced = 1;
i = k;
j = l;
*Duplex_El += LE;
break;
}
}
if (traced)
break;
}
}
if (!traced) {
int correction;
correction = vrna_E_ext_stem(type, (i > 1) ? SS1[i - 1] : -1, (j < n2) ? SS2[j + 1] : -1, P);
E -= correction;
*Duplex_El += correction;
/**
*** if (i>1) {E -= P->dangle5[type][SS1[i-1]]; *Duplex_El +=P->dangle5[type][SS1[i-1]];}
*** if (j<n2) {E -= P->dangle3[type][SS2[j+1]]; *Duplex_El +=P->dangle3[type][SS2[j+1]];}
*** if (type>2) {E -= P->TerminalAU; *Duplex_El +=P->TerminalAU;}
**/
if (E != P->DuplexInit)
vrna_message_error("backtrack failed in fold duplex end");
else
break;
}
}
/* if (i>1) i--; */
/* if (j<n2) j++; */
/* struc = (char *) vrna_alloc(i0-i+1+j-j0+1+2); */ /* declare final duplex structure */
struc = (char *)vrna_alloc(i0 - i + 1 + n2 - 1 + 1 + 2); /* declare final duplex structure */
char *struc2;
struc2 = (char *)vrna_alloc(n2 + 1);
/* char * struct_const; */
for (k = MAX2(i, 1); k <= i0; k++)
if (!st1[k - 1])
st1[k - 1] = '.';
/* for (k=j0; k<=j; k++) if (!st2[k-1]) st2[k-1] = struc_loop[k-1];*/ /* '.'; normal */
/* char * struct_const; */
/* struct_const = (char *) vrna_alloc(sizeof(char)*(n2+1)); */
for (k = 1; k <= n2; k++) {
if (!st2[k - 1])
st2[k - 1] = struc_loop[k - 1]; /* '.'; */
struc2[k - 1] = st2[k - 1]; /* '.'; */
/* if (k>=j0 && k<=j){ */
/* struct_const[k-1]='x'; */
/* } */
/* else{ */
/* if(k<j0) {struct_const[k-1]='<';} */
/* if(k>j) {struct_const[k-1]='>';} */
/* } */
}
char duplexseq_1[j0];
char duplexseq_2[n2 - j + 2];
if (j < n2) {
strncpy(duplexseq_1, snoseq, j0 - 1);
strcpy(duplexseq_2, snoseq + j);
duplexseq_1[j0 - 1] = '\0';
duplexseq_2[n2 - j + 1] = '\0';
duplexT temp;
temp = duplexfold(duplexseq_1, duplexseq_2);
*Loop_D = MIN2(0, -410 + (int)100 * temp.energy);
if (*Loop_D) {
int l1, ibegin, iend, jbegin, jend;
l1 = strchr(temp.structure, '&') - temp.structure;
ibegin = temp.i - l1;
iend = temp.i - 1;
jbegin = temp.j;
jend = temp.j + (int)strlen(temp.structure) - l1 - 2 - 1;
for (k = ibegin + 1; k <= iend + 1; k++)
struc2[k - 1] = temp.structure[k - ibegin - 1];
for (k = jbegin + j; k <= jend + j; k++)
struc2[k - 1] = temp.structure[l1 + k - j - jbegin + 1];
}
free(temp.structure);
}
strcpy(struc, st1 + MAX2(i - 1, 0));
strcat(struc, "&");
/* strcat(struc, st2); */
strncat(struc, struc2 + 5, (int)strlen(struc2) - 10);
free(struc2);
free(struc_loop);
free(st1);
free(st2);
/* free_arrays(); */
return struc;
}
void
Lsnoop_subopt_list_XS(const char *s1,
const char *s2,
const int **access_s1,
int delta,
int w,
const int penalty,
const int threshloop,
const int threshLE,
const int threshRE,
const int threshDE,
const int threshTE,
const int threshSE,
const int threshD,
const int distance,
const int half_stem,
const int max_half_stem,
const int min_s2,
const int max_s2,
const int min_s1,
const int max_s1,
const int min_d1,
const int min_d2,
const int alignment_length,
const char *name,
const int fullStemEnergy)
{
int min_colonne = INF;
int max_pos;
int max;
max = INF;
/* int temp; */
/* int nsubopt=10; */
n1 = (int)strlen(s1);
n2 = (int)strlen(s2);
int *position;
int *position_j;
int min_j_colonne;
int max_pos_j = INF;
position = (int *)vrna_alloc((n1 + 3) * sizeof(int));
position_j = (int *)vrna_alloc((n1 + 3) * sizeof(int));
/* int Eminj, Emin_l; */
int i, j;/* l1, Emin=INF, i_min=0, j_min=0; */
/* char *struc; */
/* snoopT mfe; */
int *indx;
int *mLoop;
int *cLoop;
folden **foldlist, **foldlist_XS;
int Duplex_El, Duplex_Er;
int Loop_D;
/* int u; */
int Loop_E;
vrna_md_t md;
Duplex_El = 0;
Duplex_Er = 0;
Loop_E = 0, Loop_D = 0;
snoexport_fold_arrays(&indx, &mLoop, &cLoop, &foldlist, &foldlist_XS);
set_model_details(&md);
if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) {
snoupdate_fold_params();
if (P)
free(P);
P = vrna_params(&md);
make_pair_matrix();
}
lpair = (int **)vrna_alloc(sizeof(int *) * (6));
lc = (int **)vrna_alloc(sizeof(int *) * (6));
lr = (int **)vrna_alloc(sizeof(int *) * (6));
for (i = 0; i < 6; i++) {
lc[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1));
lr[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1));
lpair[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1));
for (j = n2; j > -1; j--) {
lc[i][j] = INF;
lr[i][j] = INF;
lpair[i][j] = 0;
}
}
encode_seqs(s1, s2);
int lim_maxj = n2 - min_d2;
int lim_minj = min_d1;
int lim_maxi = n1 - 5;
for (i = 5; i <= lim_maxi; i++) {
int idx = i % 5;
int idx_1 = (i - 1) % 5;
int idx_2 = (i - 2) % 5;
int idx_3 = (i - 3) % 5;
int idx_4 = (i - 4) % 5;
int di1, di2, di3, di4;
di1 = access_s1[5][i] - access_s1[4][i - 1];
di2 = access_s1[5][i - 1] - access_s1[4][i - 2] + di1;
di3 = access_s1[5][i - 2] - access_s1[4][i - 3] + di2;
di4 = access_s1[5][i - 3] - access_s1[4][i - 4] + di3;
di1 = MIN2(di1, 165);
di2 = MIN2(di2, 330);
di3 = MIN2(di3, 495);
di4 = MIN2(di4, 660);
for (j = lim_maxj; j > lim_minj; j--) {
int type, type2;/* E, k,l; */
type = pair[S1[i]][S2[j]];
lpair[idx][j] = type;
lc[idx][j] = (type) ? P->DuplexInit + access_s1[1][i] : INF;
lr[idx][j] = INF;
if (!type)
continue;
if ( /*pair[S1[i+1]][S2[j-1]] && Be sure it binds*/
j < max_s1 && j > min_s1 &&
j > n2 - max_s2 - max_half_stem &&
j < n2 - min_s2 - half_stem && S1[i - 2] == 4) {
/*constraint on s2 and i*/
int min_k, max_k;
max_k = MIN2(n2 - min_s2, j + max_half_stem + 1);
min_k = MAX2(j + half_stem + 1, n2 - max_s2);
folden *temp;
temp = foldlist[j + 1];
while (temp->next) {
int k = temp->k;
/* if(k >= min_k-1 && k < max_k){ comment to recover normal behaviour */
if (lpair[idx_3][k + 1] && lc[idx_3][k + 1] /*+di3*/ < 411 /*&& lpair[idx_4][k+2]*/) /* remove second condition */
lr[idx][j] = MIN2(lr[idx][j], di3 + lc[idx_3][k + 1] + temp->energy); /*--NU--*/
/*else*/ if (lpair[idx_4][k + 1] && /*di4 +*/ lc[idx_4][k + 1] < 411) /*--NUN--*/ /* remove second condition */
lr[idx][j] = MIN2(lr[idx][j], di4 + lc[idx_4][k + 1] + temp->energy);
/* } */
temp = temp->next;
}
}
/* dangle 5'SIDE relative to the mRNA */
/**
*** lc[idx][j] += P->dangle5[type][SS1[i-1]];
*** lc[idx][j] += P->dangle3[type][SS2[j+1]];
*** if (type>2) lc[idx][j] += P->TerminalAU;
**/
lc[idx][j] += vrna_E_ext_stem(type, SS1[i - 1], SS2[j + 1], P);
/* if(j<n2 && i>1){ */
/* type2=pair[S1[i-1]][S2[j+1]]; */
type2 = lpair[idx_1][j + 1];
if (type2 > 0) {
lc[idx][j] =
MIN2(lc[idx_1][j + 1] +
E_IntLoop(0, 0, type2, rtype[type], SS1[i], SS2[j], SS1[i - 1], SS2[j + 1], P) + di1,
lc[idx][j]);
lr[idx][j] =
MIN2(lr[idx_1][j + 1] +
E_IntLoop(0, 0, type2, rtype[type], SS1[i], SS2[j], SS1[i - 1], SS2[j + 1], P) + di1,
lr[idx][j]);
}
type2 = lpair[idx_2][j + 2];
if (type2 > 0) {
lc[idx][j] =
MIN2(lc[idx_2][j + 2] +
E_IntLoop(1, 1, type2, rtype[type], SS1[i - 1], SS2[j + 1], SS1[i - 1], SS2[j + 1],
P) + di2,
lc[idx][j]);
lr[idx][j] =
MIN2(lr[idx_2][j + 2] +
E_IntLoop(1, 1, type2, rtype[type], SS1[i - 1], SS2[j + 1], SS1[i - 1], SS2[j + 1],
P) + di2,
lr[idx][j]);
}
type2 = lpair[idx_3][j + 3];
if (type2 > 0) {
lc[idx][j] =
MIN2(lc[idx_3][j + 3] +
E_IntLoop(2, 2, type2, rtype[type], SS1[i - 2], SS2[j + 2], SS1[i - 1], SS2[j + 1],
P) + di3,
lc[idx][j]);
lr[idx][j] =
MIN2(lr[idx_3][j + 3] +
E_IntLoop(2, 2, type2, rtype[type], SS1[i - 2], SS2[j + 2], SS1[i - 1], SS2[j + 1],
P) + di3,
lr[idx][j]);
}
int bla;
int temp2;
temp2 = min_colonne;
bla = lr[idx][j] + vrna_E_ext_stem(rtype[type], SS2[j - 1], SS1[i + 1], P);
/**
*** (type>2?P->TerminalAU:0)+P->dangle3[rtype[type]][SS1[i+1]]+P->dangle5[rtype[type]][SS2[j-1]];
**/
min_colonne = MIN2(bla, min_colonne);
if (temp2 > min_colonne)
min_j_colonne = j;
}
position[i] = min_colonne;
if (max >= min_colonne) {
max = min_colonne;
max_pos = i;
max_pos_j = min_j_colonne;
}
position_j[i] = min_j_colonne;
min_colonne = INF;
}
free(S1);
free(S2);
free(SS1);
free(SS2);
if (max < threshTE + 30 * alignment_length) {
find_max_snoop_XS(s1,
s2,
access_s1,
max,
alignment_length,
position,
position_j,
delta,
distance,
penalty,
threshloop,
threshLE,
threshRE,
threshDE,
threshTE,
threshSE,
threshD,
half_stem,
max_half_stem,
min_s2,
max_s2,
min_s1,
max_s1,
min_d1,
min_d2,
name,
fullStemEnergy);
}
for (i = 1; i < 6; i++) {
free(lc[i]);
free(lr[i]);
free(lpair[i]);
}
free(lc[0]);
free(lr[0]);
free(lpair[0]);
free(lc);
free(lr);
free(lpair);
free(position);
free(position_j);
}
PRIVATE void
find_max_snoop_XS(const char *s1,
const char *s2,
const int **access_s1,
const int max,
const int alignment_length,
const int *position,
const int *position_j,
const int delta,
const int distance,
const int penalty,
const int threshloop,
const int threshLE,
const int threshRE,
const int threshDE,
const int threshTE,
const int threshSE,
const int threshD,
const int half_stem,
const int max_half_stem,
const int min_s2,
const int max_s2,
const int min_s1,
const int max_s1,
const int min_d1,
const int min_d2,
const char *name,
const int fullStemEnergy)
{
int count = 0;
int n3 = (int)strlen(s1);
int n4 = (int)strlen(s2);
int pos = n1 - 4;
int max_pos_j;
int threshold = MIN2(threshTE + alignment_length * 30, -100);
/* printf("threshTE %d max %d\n", threshTE, max); */
/* #pragma omp parallel for */
/* for(pos=n1+1;pos>distance;pos--){ */
while (pos-- > 5) {
int temp_min = 0;
if (position[pos] < (threshold)) {
int search_range;
search_range = distance + 1;
while (--search_range)
if (position[pos - search_range] <= position[pos - temp_min])
temp_min = search_range;
pos -= temp_min;
max_pos_j = position_j[pos];
int begin = MAX2(5, pos - alignment_length);
int end = MIN2(n3 - 5, pos - 1);
char *s3 = (char *)vrna_alloc(sizeof(char) * (end - begin + 2) + 5);
strncpy(s3, (s1 + begin), end - begin + 1);
strcat(s3, "NNNNN\0");
int n5 = (int)strlen(s3);
snoopT test;
test = snoopfold_XS(s3, s2, access_s1, pos, max_pos_j, penalty,
threshloop, threshLE, threshRE,
threshDE, threshD, half_stem,
max_half_stem, min_s2, max_s2, min_s1,
max_s1, min_d1, min_d2, fullStemEnergy);
if (test.energy == INF) {
free(s3);
continue;
}
if (test.Duplex_El > threshLE * 0.01 || test.Duplex_Er > threshRE * 0.01 ||
test.Loop_D > threshD * 0.01 || (test.Duplex_Er + test.Duplex_El) > threshDE * 0.01 ||
(test.Duplex_Er + test.Duplex_El + test.Loop_E) > threshTE * 0.01 ||
(test.Duplex_Er + test.Duplex_El + test.Loop_E + test.Loop_D + 410) > threshSE * 0.01) {
free(test.structure);
free(s3);
continue;
}
char *s4;
s4 = (char *)vrna_alloc(sizeof(char) * (n4 - 9));
strncpy(s4, s2 + 5, n4 - 10);
s4[n4 - 10] = '\0';
char *s5 = vrna_alloc(sizeof(char) * n5 - test.i + 2 - 5);
strncpy(s5, s3 + test.i - 1, n5 - test.i + 1 - 5);
s5[n5 - test.i + 1 - 5] = '\0';
float dE = ((float)(access_s1[n5 - test.i + 1 - 5][pos])) * 0.01;
printf(
"%s %3d,%-3d;%3d : %3d,%-3d (%5.2f = %5.2f + %5.2f + %5.2f + %5.2f + %5.2f + 4.10) (%5.2f)\n%s&%s\n",
test.structure,
pos - (n5 - test.i),
pos - 5,
pos - (n5 - test.u),
max_pos_j - 5,
max_pos_j - 5 + (int)(strrchr(test.structure, '>') - strchr(test.structure, '>')),
test.Loop_D + test.Duplex_El + test.Duplex_Er + test.Loop_E + 4.10 + dE,
test.Duplex_El,
test.Duplex_Er,
test.Loop_E,
test.Loop_D,
dE,
test.fullStemEnergy,
s5,
s4);
if (name) {
int begin_t, end_t, begin_q, end_q, and, pipe, i;
char *psoutput;
begin_q = 0;
end_q = n4 - 10;
begin_t = 0;
end_t = n5 - test.i + 1 - 5;
and = end_t + 1;
pipe = test.u - test.i + 1;
cut_point = end_t + 1;
char *catseq, *catstruct;/* *fname; */
catseq = (char *)vrna_alloc(n5 + end_q - begin_q + 2);
catstruct = (char *)vrna_alloc(n5 + end_q - begin_q + 2);
strcpy(catseq, s5);
strncpy(catstruct, test.structure, end_t);
strcat(catseq, s4);
strncat(catstruct, test.structure + end_t + 1, end_q - begin_q + 1);
catstruct[end_t - begin_t + end_q - begin_q + 2] = '\0';
catseq[end_t - begin_t + end_q - begin_q + 2] = '\0';
int *relative_access;
relative_access = vrna_alloc(sizeof(int) * strlen(s5));
relative_access[0] = access_s1[1][pos - (n5 - test.i) + 5];
for (i = 1; i < (int)strlen(s5); i++)
relative_access[i] = access_s1[i + 1][pos - (n5 - test.i) + i + 5] -
access_s1[i][pos - (n5 - test.i) + i + 4];
psoutput = vrna_strdup_printf("sno_XS_%d_u_%d_%s.ps",
count,
pos - (n5 - test.u),
name);
PS_rna_plot_snoop_a(catseq, catstruct, psoutput, relative_access, NULL);
free(catseq);
free(catstruct);
free(relative_access);
free(psoutput);
count++;
}
free(s3);
free(s4);
free(s5);
free(test.structure);
}
}
}
snoopT
snoopfold_XS(const char *s1,
const char *s2,
const int **access_s1,
const int pos_i,
const int pos_j,
const int penalty,
const int threshloop,
const int threshLE,
const int threshRE,
const int threshDE,
const int threshD,
const int half_stem,
const int max_half_stem,
const int min_s2,
const int max_s2,
const int min_s1,
const int max_s1,
const int min_d1,
const int min_d2,
const int fullStemEnergy)
{
/* int Eminj, Emin_l; */
int a, b, i, j, Emin = INF, a_min = 0, b_min = 0;
char *struc;
snoopT mfe;
int *indx;
int *mLoop;
int *cLoop;
folden **foldlist, **foldlist_XS;
int Duplex_El, Duplex_Er;
int Loop_D;
int u;
int Loop_E;
vrna_md_t md;
Duplex_El = 0;
Duplex_Er = 0;
Loop_E = 0, Loop_D = 0;
snoexport_fold_arrays(&indx, &mLoop, &cLoop, &foldlist, &foldlist_XS);
n1 = (int)strlen(s1);
n2 = (int)strlen(s2);
set_model_details(&md);
if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) {
snoupdate_fold_params();
if (P)
free(P);
P = vrna_params(&md);
make_pair_matrix();
}
c = (int **)vrna_alloc(sizeof(int *) * (n1 + 1));
r = (int **)vrna_alloc(sizeof(int *) * (n1 + 1));
for (i = 0; i <= n1; i++) {
c[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1));
r[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1));
for (j = n2; j > -1; j--) {
c[i][j] = INF;
r[i][j] = INF;
}
}
encode_seqs(s1, s2);
i = n1 - 5;
j = pos_j;
/* printf("tar: %s\nsno: %s\n ", s1, s2); */
/* printf("pos_i %d pos_j %d\n", pos_i, pos_j); */
/* printf("type %d n1 %d n2 %d S1[n1] %d S2[n2] %d", pair[S1[i]][S2[j]], n1, n2, S1[n1], S2[n2]); */
int type, type2, E, p, q;
r[i][j] = P->DuplexInit;
/* r[i][j] += P->dangle3[rtype[type]][SS1[i+1]] + P->dangle5[rtype[type]][SS2[j-1]]; */
if (pair[S1[i]][S2[j]] > 2)
r[i][j] += P->TerminalAU;
for (a = i - 1; a > 0; a--) {
/* i-1 */
r[a + 1][0] = INF;
for (b = j + 1; b <= n2 - min_d2; b++) {
/* j+1 */
r[a][b] = INF;
type = pair[S1[a]][S2[b]];
if (!type)
continue;
if (S1[a + 1] == 4) {
folden *temp;
temp = foldlist_XS[b - 1];
while (temp->next) {
int k = temp->k;
if (pair[S1[a + 3]][S2[k - 1]] && k < max_s1 && k > min_s1 &&
k > n2 - max_s2 - max_half_stem &&
k < n2 - min_s2 - half_stem /*&& r[a+3][k-1] + access_s1[i-(a+3)+1][pos_i] < 411*/) /* remove last condition last condition is to check if the interaction is stable enough */
c[a][b] = MIN2(c[a][b], r[a + 3][k - 1] + temp->energy);
temp = temp->next;
}
}
if (S1[a + 2] == 4) {
folden *temp;
temp = foldlist_XS[b - 1];
while (temp->next) {
int k = temp->k;
if (pair[S1[a + 4]][S2[k - 1]] && k < max_s1 && k > min_s1 &&
k > n2 - max_s2 - max_half_stem &&
k < n2 - min_s2 - half_stem /*&& r[a+4][k-1] + access_s1[i-(a+4)+1][pos_i] < 411 */) /* remove last condition */
c[a][b] = MIN2(c[a][b], r[a + 4][k - 1] + temp->energy);
temp = temp->next;
}
}
for (p = a + 1; p < n1 && (p - a) < MAXLOOP_L; p++) {
/* p < n1 */
for (q = b - 1; q > 1; q--) {
/* q > 1 */
if (p - a + b - q > 2 * MAXLOOP_L - 2)
break;
if (abs((p - a) - (b - q)) >= ASS)
continue;
type2 = pair[S1[p]][S2[q]];
if (!type2)
continue;
E =
E_IntLoop(p - a - 1,
b - q - 1,
type2,
rtype[type],
SS1[a + 1],
SS2[b - 1],
SS1[p - 1],
SS2[q + 1],
P);
c[a][b] = MIN2(c[a][b], c[p][q] + E);
r[a][b] = MIN2(r[a][b], r[p][q] + E);
}
}
E = c[a][b];
if (type > 2)
E += P->TerminalAU;
/* E +=P->dangle5[rtype[type]][SS1[i+1]]; */
/* E +=P->dangle5[rtype[type]][SS2[j-1]]; */
E += access_s1[i - a + 1][pos_i];
if (E < Emin) {
Emin = E;
a_min = a;
b_min = b;
}
}
}
if (Emin > 0) {
printf("no target found under the constraints chosen\n");
for (i = 0; i <= n1; i++) {
free(r[i]);
free(c[i]);
}
free(c);
free(r);
free(S1);
free(S2);
free(SS1);
free(SS2);
mfe.energy = INF;
return mfe;
}
type2 = pair[S1[a_min]][S2[b_min]];
if (type2 > 2)
Emin += P->TerminalAU;
mfe.energy = ((float)(Emin)) / 100;
struc = snoop_backtrack_XS(a_min,
b_min,
s2,
&Duplex_El,
&Duplex_Er,
&Loop_E,
&Loop_D,
&u,
penalty,
threshloop,
threshLE,
threshRE,
threshDE,
threshD,
half_stem,
max_half_stem,
min_s2,
max_s2,
min_s1,
max_s1,
min_d1,
min_d2);
mfe.i = a_min;
mfe.j = b_min;
mfe.u = u;
mfe.Duplex_Er = (float)Duplex_Er / 100;
mfe.Duplex_El = (float)Duplex_El / 100;
mfe.Loop_D = (float)Loop_D / 100;
mfe.Loop_E = (float)Loop_E / 100;
mfe.energy = (float)Emin / 100;
mfe.fullStemEnergy = (float)fullStemEnergy / 100;
mfe.structure = struc;
return mfe;
}
PRIVATE char *
snoop_backtrack_XS(int i,
int j,
const char *snoseq,
int *Duplex_El,
int *Duplex_Er,
int *Loop_E,
int *Loop_D,
int *u,
const int penalty,
const int threshloop,
const int threshLE,
const int threshRE,
const int threshDE,
const int threshD,
const int half_stem,
const int max_half_stem,
const int min_s2,
const int max_s2,
const int min_s1,
const int max_s1,
const int min_d1,
const int min_d2)
{
/* backtrack structure going backwards from i, and forwards from j
* return structure in bracket notation with & as separator */
int k, l, type, type2, E, traced, i0, j0;
int traced_c = 0; /* flag for following backtrack in c or r */
char *st1, *st2, *struc;
char *struc_loop;
st1 = (char *)vrna_alloc(sizeof(char) * (n1 + 1));
st2 = (char *)vrna_alloc(sizeof(char) * (n2 + 1));
int *indx;
int *mLoop;
int *cLoop;
folden **foldlist, **foldlist_XS;
type = pair[S1[i]][S2[j]];
snoexport_fold_arrays(&indx, &mLoop, &cLoop, &foldlist, &foldlist_XS);
i0 = i;
j0 = j;
/* i0=MAX2(i,1); j0=MIN2(j+1,n2); */
while (i <= n1 && j >= 1) {
if (!traced_c) {
E = c[i][j];
traced = 0;
st1[i] = '<';
st2[j - 1] = '>';
type = pair[S1[i]][S2[j]];
if (!type)
vrna_message_error("backtrack failed in fold duplex c");
for (k = i + 1; k > 0 && (k - i) < MAXLOOP_L; k++) {
for (l = j - 1; l >= 1; l--) {
int LE;
if (k - i + j - l > 2 * MAXLOOP_L - 2)
break;
if (abs(k - i - j + l) >= ASS)
continue;
type2 = pair[S1[k]][S2[l]];
if (!type2)
continue;
LE = E_IntLoop(k - i - 1, j - l - 1, type2, rtype[type],
SS1[i + 1], SS2[j - 1], SS1[k - 1], SS2[l + 1], P);
if (E == c[k][l] + LE) {
traced = 1;
i = k;
j = l;
*Duplex_El += LE;
break;
}
}
if (traced)
break;
}
if (!traced) {
if (S1[i + 1] == 4) {
folden *temp;
temp = foldlist_XS[j - 1];
while (temp->next) {
int k = temp->k;
if (pair[S1[i + 3]][S2[k - 1]] && k < max_s1 && k > min_s1 &&
k > n2 - max_s2 - max_half_stem && k < n2 - min_s2 - half_stem) {
if (E == r[i + 3][k - 1] + temp->energy) {
*Loop_E = temp->energy;
st1[i + 1] = '|';
st1[i + 2] = '.';
*u = i + 1;
int a, b;
for (a = 0; a < MISMATCH; a++) {
for (b = 0; b < MISMATCH; b++) {
int ij = indx[j - 1 - a] + k + b;
if (cLoop[ij] == temp->energy) {
struc_loop = snobacktrack_fold_from_pair(snoseq, k + b, j - 1 - a);
a = INF;
b = INF;
}
}
}
traced = 1;
traced_c = 1;
i = i + 3;
j = k - 1;
break;
}
}
temp = temp->next;
}
}
if (S1[i + 2] == 4) {
/* introduce structure from RNAfold */
folden *temp;
temp = foldlist_XS[j - 1];
while (temp->next) {
int k = temp->k;
if (pair[S1[i + 4]][S2[k - 1]] && k < max_s1 && k > min_s1 &&
k > n2 - max_s2 - max_half_stem && k < n2 - min_s2 - half_stem) {
if (E == r[i + 4][k - 1] + temp->energy) {
*Loop_E = temp->energy;
st1[i + 2] = '|';
st1[i + 1] = st1[i + 3] = '.';
*u = i + 2;
int a, b;
for (a = 0; a < MISMATCH; a++) {
for (b = 0; b < MISMATCH; b++) {
int ij = indx[j - 1 - a] + k + b;
if (cLoop[ij] == temp->energy) {
struc_loop = snobacktrack_fold_from_pair(snoseq, k + b, j - a - 1);
a = INF;
b = INF;
}
}
}
traced = 1;
traced_c = 1;
i = i + 4;
j = k - 1;
break;
}
}
temp = temp->next;
}
}
} /* traced? */
} /* traced_r? */
else {
E = r[i][j];
traced = 0;
st1[i] = '<';
st2[j - 1] = '>';
type = pair[S1[i]][S2[j]];
if (!type)
vrna_message_error("backtrack failed in fold duplex r");
for (k = i + 1; k > 0 && (k - i) < MAXLOOP_L; k++) {
for (l = j - 1; l >= 1; l--) {
int LE;
if (k - i + j - l > 2 * MAXLOOP_L - 2)
break;
if (abs(k - i - j + l) >= ASS)
continue;
type2 = pair[S1[k]][S2[l]];
if (!type2)
continue;
LE = E_IntLoop(k - i - 1, j - l - 1, type2, rtype[type],
SS1[i + 1], SS2[j - 1], SS1[k - 1], SS2[l + 1], P);
if (E == r[k][l] + LE) {
traced = 1;
i = k;
j = l;
*Duplex_Er += LE;
break;
}
}
if (traced)
break;
}
}
if (!traced) {
/* if (i>1) {E -= P->dangle5[type][SS1[i-1]]; *Duplex_El +=P->dangle5[type][SS1[i-1]];} */
/* if (j<n2) {E -= P->dangle3[type][SS2[j+1]]; *Duplex_El +=P->dangle3[type][SS2[j+1]];} */
if (type > 2) {
E -= P->TerminalAU;
*Duplex_Er += P->TerminalAU;
}
if (E != P->DuplexInit)
vrna_message_error("backtrack failed in fold duplex end");
else
break;
}
}
/* struc = (char *) vrna_alloc(i0-i+1+j-j0+1+2); */ /* declare final duplex structure */
struc = (char *)vrna_alloc(i - i0 + 1 + n2); /* declare final duplex structure */
char *struc2;
struc2 = (char *)vrna_alloc(n2 + 1);
/* char * struct_const; */
for (k = MIN2(i0, 1); k <= i; k++)
if (!st1[k - 1])
st1[k - 1] = '.';
/* for (k=j0; k<=j; k++) if (!st2[k-1]) st2[k-1] = struc_loop[k-1];*/ /* '.'; normal */
/* char * struct_const; */
/* struct_const = (char *) vrna_alloc(sizeof(char)*(n2+1)); */
for (k = 1; k <= n2; k++) {
if (!st2[k - 1])
st2[k - 1] = struc_loop[k - 1]; /* '.'; */
struc2[k - 1] = st2[k - 1]; /* '.'; */
/* if (k>=j0 && k<=j){ */
/* struct_const[k-1]='x'; */
/* } */
/* else{ */
/* if(k<j0) {struct_const[k-1]='<';} */
/* if(k>j) {struct_const[k-1]='>';} */
/* } */
}
char duplexseq_1[j];
char duplexseq_2[n2 - j0 + 2];
if (j0 < n2) {
strncpy(duplexseq_1, snoseq, j - 1);
strcpy(duplexseq_2, snoseq + j0);
duplexseq_1[j - 1] = '\0';
duplexseq_2[n2 - j0 + 1] = '\0';
duplexT temp;
temp = duplexfold(duplexseq_1, duplexseq_2);
*Loop_D = MIN2(0, -410 + (int)100 * temp.energy);
if (*Loop_D) {
int l1, ibegin, iend, jbegin, jend;
l1 = strchr(temp.structure, '&') - temp.structure;
ibegin = temp.i - l1;
iend = temp.i - 1;
jbegin = temp.j;
jend = temp.j + (int)strlen(temp.structure) - l1 - 2 - 1;
for (k = ibegin + 1; k <= iend + 1; k++)
struc2[k - 1] = temp.structure[k - ibegin - 1];
for (k = jbegin + j0; k <= jend + j0; k++)
struc2[k - 1] = temp.structure[l1 + k - j0 - jbegin + 1];
}
free(temp.structure);
}
strcpy(struc, st1 + MAX2(i0, 1));
strcat(struc, "&");
/* strcat(struc, st2); */
strncat(struc, struc2 + 5, (int)strlen(struc2) - 10);
free(struc2);
free(struc_loop);
free(st1);
free(st2);
for (i = 0; i <= n1; i++) {
free(r[i]);
free(c[i]);
}
free(c);
free(r);
free(S1);
free(S2);
free(SS1);
free(SS2);
/* free_arrays(); */
return struc;
}
PRIVATE int
covscore(const int *types,
int n_seq)
{
/* calculate co-variance bonus for a pair depending on */
/* compensatory/consistent mutations and incompatible seqs */
/* should be 0 for conserved pairs, >0 for good pairs */
#define NONE -10000 /* score for forbidden pairs */
int k, l, s, score, pscore;
int dm[7][7] = { { 0, 0, 0, 0, 0, 0, 0 }, /* hamming distance between pairs */
{ 0, 0, 2, 2, 1, 2, 2 } /* CG */,
{ 0, 2, 0, 1, 2, 2, 2 } /* GC */,
{ 0, 2, 1, 0, 2, 1, 2 } /* GU */,
{ 0, 1, 2, 2, 0, 2, 1 } /* UG */,
{ 0, 2, 2, 1, 2, 0, 2 } /* AU */,
{ 0, 2, 2, 2, 1, 2, 0 } /* UA */ };
int pfreq[8] = {
0, 0, 0, 0, 0, 0, 0, 0
};
for (s = 0; s < n_seq; s++)
pfreq[types[s]]++;
if (pfreq[0] * 2 > n_seq)
return NONE;
for (k = 1, score = 0; k <= 6; k++) /* ignore pairtype 7 (gap-gap) */
for (l = k + 1; l <= 6; l++)
/* scores for replacements between pairtypes */
/* consistent or compensatory mutations score 1 or 2 */
score += pfreq[k] * pfreq[l] * dm[k][l];
/* counter examples score -1, gap-gap scores -0.25 */
pscore = cv_fact *
((UNIT * score) / n_seq - nc_fact * UNIT * (pfreq[0] + pfreq[7] * 0.25));
return pscore;
}
/*---------------------------------------------------------------------------*/
PRIVATE short *
aliencode_seq(const char *sequence)
{
unsigned int i, l;
short *Stemp;
l = strlen(sequence);
Stemp = (short *)vrna_alloc(sizeof(short) * (l + 2));
Stemp[0] = (short)l;
/* make numerical encoding of sequence */
for (i = 1; i <= l; i++)
Stemp[i] = (short)encode_char(toupper(sequence[i - 1]));
/* for circular folding add first base at position n+1 */
/* Stemp[l+1] = Stemp[1]; */
return Stemp;
}
PRIVATE short *
encode_seq(const char *sequence)
{
unsigned int i, l;
short *S;
l = strlen(sequence);
extern double nc_fact;
S = (short *)vrna_alloc(sizeof(short) * (l + 2));
S[0] = (short)l;
/* make numerical encoding of sequence */
for (i = 1; i <= l; i++)
S[i] = (short)encode_char(toupper(sequence[i - 1]));
/* for circular folding add first base at position n+1 */
S[l + 1] = S[1];
return S;
}
PRIVATE void
encode_seqs(const char *s1,
const char *s2)
{
unsigned int i, l;
l = strlen(s1);
S1 = encode_seq(s1);
SS1 = (short *)vrna_alloc(sizeof(short) * (l + 1));
/* SS1 exists only for the special X K and I bases and energy_set!=0 */
for (i = 1; i <= l; i++) /* make numerical encoding of sequence */
SS1[i] = alias[S1[i]]; /* for mismatches of nostandard bases */
l = strlen(s2);
S2 = encode_seq(s2);
SS2 = (short *)vrna_alloc(sizeof(short) * (l + 1));
/* SS2 exists only for the special X K and I bases and energy_set!=0 */
for (i = 1; i <= l; i++) /* make numerical encoding of sequence */
SS2[i] = alias[S2[i]]; /* for mismatches of nostandard bases */
}
PRIVATE int
compare(const void *sub1,
const void *sub2)
{
int d;
if (((snoopT *)sub1)->energy > ((snoopT *)sub2)->energy)
return 1;
if (((snoopT *)sub1)->energy < ((snoopT *)sub2)->energy)
return -1;
d = ((snoopT *)sub1)->i - ((snoopT *)sub2)->i;
if (d != 0)
return d;
return ((snoopT *)sub1)->j - ((snoopT *)sub2)->j;
}
|
multi_bspline_create.c | /////////////////////////////////////////////////////////////////////////////
// einspline: a library for creating and evaluating B-splines //
// Copyright (C) 2007 Kenneth P. Esler, Jr. //
// //
// This program is free software; you can redistribute it and/or modify //
// it under the terms of the GNU General Public License as published by //
// the Free Software Foundation; either version 2 of the License, or //
// (at your option) any later version. //
// //
// This program is distributed in the hope that it will be useful, //
// but WITHOUT ANY WARRANTY; without even the implied warranty of //
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the //
// GNU General Public License for more details. //
// //
// You should have received a copy of the GNU General Public License //
// along with this program; if not, write to the Free Software //
// Foundation, Inc., 51 Franklin Street, Fifth Floor, //
// Boston, MA 02110-1301 USA //
/////////////////////////////////////////////////////////////////////////////
#include "multi_bspline_create.h"
#ifndef _XOPEN_SOURCE
#define _XOPEN_SOURCE 600
#endif
#ifndef __USE_XOPEN2K
#define __USE_XOPEN2K
#endif
#include <stdlib.h>
#include <stdio.h>
#include <inttypes.h>
int posix_memalign(void **memptr, size_t alignment, size_t size);
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
//// Helper functions for spline creation ////
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
void init_sse_data();
void
find_coefs_1d_d (Ugrid grid, BCtype_d bc,
double *data, intptr_t dstride,
double *coefs, intptr_t cstride);
void
solve_deriv_interp_1d_s (float bands[], float coefs[],
int M, int cstride);
// On input, bands should be filled with:
// row 0 : abcdInitial from boundary conditions
// rows 1:M: basis functions in first 3 cols, data in last
// row M+1 : abcdFinal from boundary conditions
// cstride gives the stride between values in coefs.
// On exit, coefs with contain interpolating B-spline coefs
void
solve_periodic_interp_1d_s (float bands[], float coefs[],
int M, int cstride);
// On input, bands should be filled with:
// row 0 : abcdInitial from boundary conditions
// rows 1:M: basis functions in first 3 cols, data in last
// row M+1 : abcdFinal from boundary conditions
// cstride gives the stride between values in coefs.
// On exit, coefs with contain interpolating B-spline coefs
void
solve_antiperiodic_interp_1d_s (float bands[], float coefs[],
int M, int cstride);
void
find_coefs_1d_s (Ugrid grid, BCtype_s bc,
float *data, intptr_t dstride,
float *coefs, intptr_t cstride);
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
//// Single-Precision, Real Creation Routines ////
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
// On input, bands should be filled with:
// row 0 : abcdInitial from boundary conditions
// rows 1:M: basis functions in first 3 cols, data in last
// row M+1 : abcdFinal from boundary conditions
// cstride gives the stride between values in coefs.
// On exit, coefs with contain interpolating B-spline coefs
multi_UBspline_1d_s*
create_multi_UBspline_1d_s (Ugrid x_grid, BCtype_s xBC, int num_splines)
{
// Create new spline
multi_UBspline_1d_s* restrict spline = malloc (sizeof(multi_UBspline_1d_s));
if (!spline) {
fprintf (stderr, "Out of memory allocating spline in create_multi_UBspline_1d_s.\n");
abort();
}
spline->spcode = MULTI_U1D;
spline->tcode = SINGLE_REAL;
spline->xBC = xBC; spline->x_grid = x_grid;
spline->num_splines = num_splines;
// Setup internal variables
int Mx = x_grid.num;
int Nx;
if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) {
x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num);
Nx = Mx+3;
}
else {
x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num-1);
Nx = Mx+2;
}
int N = num_splines;
#ifdef HAVE_SSE
if (N % 4)
N += 4 - (N % 4);
#endif
spline->x_stride = N;
x_grid.delta_inv = 1.0/x_grid.delta;
spline->x_grid = x_grid;
#ifndef HAVE_POSIX_MEMALIGN
spline->coefs = malloc (sizeof(float)*Nx*N);
#else
posix_memalign ((void**)&spline->coefs, 64, (sizeof(float)*Nx*N));
#endif
spline->coefs_size=(size_t)Nx*(size_t)N;
#ifdef HAVE_SSE
init_sse_data();
#endif
if (!spline->coefs) {
fprintf (stderr, "Out of memory allocating spline coefficient in create_multi_UBspline_1d_s.\n");
abort();
}
return spline;
}
void
set_multi_UBspline_1d_s (multi_UBspline_1d_s *spline, int num,
float *data)
{
float *coefs = spline->coefs + num;
int xs = spline->x_stride;
find_coefs_1d_s (spline->x_grid, spline->xBC, data, 1,
coefs, xs);
}
multi_UBspline_2d_s*
create_multi_UBspline_2d_s (Ugrid x_grid, Ugrid y_grid,
BCtype_s xBC, BCtype_s yBC, int num_splines)
{
// Create new spline
multi_UBspline_2d_s* restrict spline = malloc (sizeof(multi_UBspline_2d_s));
if (!spline) {
fprintf (stderr, "Out of memory allocating spline in create_multi_UBspline_2d_s.\n");
abort();
}
spline->spcode = MULTI_U2D;
spline->tcode = SINGLE_REAL;
spline->xBC = xBC;
spline->yBC = yBC;
spline->num_splines = num_splines;
// Setup internal variables
int Mx = x_grid.num;
int My = y_grid.num;
int Nx, Ny;
if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC)
Nx = Mx+3;
else
Nx = Mx+2;
x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3);
x_grid.delta_inv = 1.0/x_grid.delta;
spline->x_grid = x_grid;
if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC)
Ny = My+3;
else
Ny = My+2;
y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3);
y_grid.delta_inv = 1.0/y_grid.delta;
spline->y_grid = y_grid;
int N = num_splines;
#ifdef HAVE_SSE
if (N % 4)
N += 4 - (N % 4);
#endif
spline->x_stride = Ny*N;
spline->y_stride = N;
#ifndef HAVE_POSIX_MEMALIGN
spline->coefs = malloc ((size_t)sizeof(float)*Nx*Ny*N);
#else
posix_memalign ((void**)&spline->coefs, 64,
sizeof(float)*Nx*Ny*N);
#endif
#ifdef HAVE_SSE
init_sse_data();
#endif
if (!spline->coefs) {
fprintf (stderr, "Out of memory allocating spline coefficients in create_multi_UBspline_2d_s.\n");
abort();
}
return spline;
}
void
set_multi_UBspline_2d_s (multi_UBspline_2d_s* spline, int num, float *data)
{
int Mx = spline->x_grid.num;
int My = spline->y_grid.num;
int Nx, Ny;
if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC)
Nx = Mx+3;
else
Nx = Mx+2;
if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC)
Ny = My+3;
else
Ny = My+2;
float *coefs = spline->coefs + num;
int ys = spline->y_stride;
// First, solve in the X-direction
for (int iy=0; iy<My; iy++) {
intptr_t doffset = iy;
intptr_t coffset = iy*ys;
find_coefs_1d_s (spline->x_grid, spline->xBC, data+doffset, (intptr_t)My,
coefs+coffset, (intptr_t)Ny*ys);
}
// Now, solve in the Y-direction
for (int ix=0; ix<Nx; ix++) {
intptr_t doffset = ix*Ny*ys;
intptr_t coffset = ix*Ny*ys;
find_coefs_1d_s (spline->y_grid, spline->yBC, coefs+doffset, (intptr_t)ys,
coefs+coffset, (intptr_t)ys);
}
}
multi_UBspline_3d_s*
create_multi_UBspline_3d_s (Ugrid x_grid, Ugrid y_grid, Ugrid z_grid,
BCtype_s xBC, BCtype_s yBC, BCtype_s zBC,
int num_splines)
{
// Create new spline
multi_UBspline_3d_s* restrict spline = malloc (sizeof(multi_UBspline_3d_s));
if (!spline) {
fprintf (stderr, "Out of memory allocating spline in create_multi_UBspline_3d_s.\n");
abort();
}
spline->spcode = MULTI_U3D;
spline->tcode = SINGLE_REAL;
spline->xBC = xBC;
spline->yBC = yBC;
spline->zBC = zBC;
spline->num_splines = num_splines;
// Setup internal variables
int Mx = x_grid.num; int My = y_grid.num; int Mz = z_grid.num;
int Nx, Ny, Nz;
if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC)
Nx = Mx+3;
else
Nx = Mx+2;
x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3);
x_grid.delta_inv = 1.0/x_grid.delta;
spline->x_grid = x_grid;
if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC)
Ny = My+3;
else
Ny = My+2;
y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3);
y_grid.delta_inv = 1.0/y_grid.delta;
spline->y_grid = y_grid;
if (zBC.lCode == PERIODIC || zBC.lCode == ANTIPERIODIC)
Nz = Mz+3;
else
Nz = Mz+2;
z_grid.delta = (z_grid.end - z_grid.start)/(double)(Nz-3);
z_grid.delta_inv = 1.0/z_grid.delta;
spline->z_grid = z_grid;
int N = num_splines;
#if defined(HAVE_SSE)
if (N % 4)
N += 4 - (N % 4);
// fprintf(stdout, " The coefs has been 16-byte aligned.\n");
#endif
spline->x_stride = Ny*Nz*N;
spline->y_stride = Nz*N;
spline->z_stride = N;
#ifndef HAVE_POSIX_MEMALIGN
spline->coefs = malloc (sizeof(float)*Nx*Ny*Nz*N);
#else
posix_memalign ((void**)&spline->coefs, 64,
((size_t)sizeof(float)*Nx*Ny*Nz*N));
#endif
spline->coefs_size=(size_t)Nx*(size_t)Ny*(size_t)Nz*(size_t)N;
#ifdef HAVE_SSE
init_sse_data();
#endif
if (!spline->coefs) {
fprintf (stderr, "Out of memory allocating spline coefficients in create_multi_UBspline_3d_s.\n");
abort();
}
return spline;
}
void
set_multi_UBspline_3d_s (multi_UBspline_3d_s* spline, int num, float *data)
{
int Mx = spline->x_grid.num;
int My = spline->y_grid.num;
int Mz = spline->z_grid.num;
int Nx, Ny, Nz;
if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC)
Nx = Mx+3;
else
Nx = Mx+2;
if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC)
Ny = My+3;
else
Ny = My+2;
if (spline->zBC.lCode == PERIODIC || spline->zBC.lCode == ANTIPERIODIC)
Nz = Mz+3;
else
Nz = Mz+2;
float *coefs = spline->coefs + num;
intptr_t zs = spline->z_stride;
// First, solve in the X-direction
#pragma omp parallel for
for (int iy=0; iy<My; iy++)
for (int iz=0; iz<Mz; iz++) {
intptr_t doffset = iy*Mz+iz;
intptr_t coffset = (iy*Nz+iz)*zs;
find_coefs_1d_s (spline->x_grid, spline->xBC, data+doffset, (intptr_t)(My*Mz),
coefs+coffset, (intptr_t)(Ny*Nz)*zs);
}
// Now, solve in the Y-direction
#pragma omp parallel for
for (int ix=0; ix<Nx; ix++)
for (int iz=0; iz<Nz; iz++) {
intptr_t doffset = (ix*Ny*Nz + iz)*zs;
intptr_t coffset = (ix*Ny*Nz + iz)*zs;
find_coefs_1d_s (spline->y_grid, spline->yBC, coefs+doffset, (intptr_t)Nz*zs,
coefs+coffset, (intptr_t)Nz*zs);
}
// Now, solve in the Z-direction
#pragma omp parallel for
for (int ix=0; ix<Nx; ix++)
for (int iy=0; iy<Ny; iy++) {
intptr_t doffset = ((ix*Ny+iy)*Nz)*zs;
intptr_t coffset = ((ix*Ny+iy)*Nz)*zs;
find_coefs_1d_s (spline->z_grid, spline->zBC, coefs+doffset,
zs, coefs+coffset, zs);
}
}
void
set_multi_UBspline_3d_s_d(multi_UBspline_3d_s* spline, int num, double *data)
{
BCtype_d xBC, yBC, zBC;
xBC.lCode=spline->xBC.lCode; xBC.rCode=spline->xBC.rCode;
yBC.lCode=spline->yBC.lCode; yBC.rCode=spline->yBC.rCode;
zBC.lCode=spline->zBC.lCode; zBC.rCode=spline->zBC.rCode;
xBC.lVal=spline->xBC.lVal; xBC.rVal=spline->xBC.rVal;
yBC.lVal=spline->yBC.lVal; yBC.rVal=spline->yBC.rVal;
zBC.lVal=spline->zBC.lVal; zBC.rVal=spline->zBC.rVal;
int Mx = spline->x_grid.num;
int My = spline->y_grid.num;
int Mz = spline->z_grid.num;
int Nx, Ny, Nz;
if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC)
Nx = Mx+3;
else
Nx = Mx+2;
if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC)
Ny = My+3;
else
Ny = My+2;
if (spline->zBC.lCode == PERIODIC || spline->zBC.lCode == ANTIPERIODIC)
Nz = Mz+3;
else
Nz = Mz+2;
double *spline_tmp = malloc(sizeof(double)*Nx*Ny*Nz);
// First, solve in the X-direction
#pragma omp parallel for
for (int iy=0; iy<My; iy++)
for (int iz=0; iz<Mz; iz++) {
intptr_t doffset = iy*Mz+iz;
intptr_t coffset = iy*Nz+iz;
find_coefs_1d_d (spline->x_grid, xBC, data+doffset, My*Mz, spline_tmp+coffset, Ny*Nz);
}
// Now, solve in the Y-direction
#pragma omp parallel for
for (int ix=0; ix<Nx; ix++)
for (int iz=0; iz<Nz; iz++) {
intptr_t doffset = ix*Ny*Nz + iz;
intptr_t coffset = ix*Ny*Nz + iz;
find_coefs_1d_d (spline->y_grid, yBC, spline_tmp+doffset, Nz, spline_tmp+coffset, Nz);
}
// Now, solve in the Z-direction
#pragma omp parallel for
for (int ix=0; ix<Nx; ix++)
for (int iy=0; iy<Ny; iy++) {
intptr_t doffset = (ix*Ny+iy)*Nz;
intptr_t coffset = (ix*Ny+iy)*Nz;
find_coefs_1d_d (spline->z_grid, zBC, spline_tmp+doffset, 1, spline_tmp+coffset, 1);
}
{
// const double* restrict i_ptr=spline_tmp;
#pragma omp parallel for
for(int ix=0; ix<Nx; ++ix)
{
const double* restrict i_ptr=spline_tmp+ix*Ny*Nz;
for(int iy=0; iy<Ny; ++iy)
for(int iz=0; iz<Nz; ++iz)
spline->coefs[ix*spline->x_stride +
iy*spline->y_stride +
iz*spline->z_stride + num] = (float)(*i_ptr++);
}
}
free (spline_tmp);
}
/////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
//// Single-Precision, Complex Creation Routines ////
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
// On input, bands should be filled with:
// row 0 : abcdInitial from boundary conditions
// rows 1:M: basis functions in first 3 cols, data in last
// row M+1 : abcdFinal from boundary conditions
// cstride gives the stride between values in coefs.
// On exit, coefs with contain interpolating B-spline coefs
multi_UBspline_1d_c*
create_multi_UBspline_1d_c (Ugrid x_grid, BCtype_c xBC, int num_splines)
{
// Create new spline
multi_UBspline_1d_c* restrict spline = malloc (sizeof(multi_UBspline_1d_c));
if (!spline) {
fprintf (stderr, "Out of memory allocating spline in create_multi_UBspline_1d_c.\n");
abort();
}
spline->spcode = MULTI_U1D;
spline->tcode = SINGLE_COMPLEX;
spline->xBC = xBC;
spline->num_splines = num_splines;
// Setup internal variables
int M = x_grid.num;
int N;
if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) {
x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num);
N = M+3;
}
else {
x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num-1);
N = M+2;
}
x_grid.delta_inv = 1.0/x_grid.delta;
spline->x_grid = x_grid;
spline->x_stride = num_splines;
#ifndef HAVE_POSIX_MEMALIGN
spline->coefs = malloc (2*sizeof(float)*N*num_splines);
#else
posix_memalign ((void**)&spline->coefs, 64, 2*sizeof(float)*N*num_splines);
#endif
spline->coefs_size=(size_t)N*(size_t)num_splines;
#ifdef HAVE_SSE
init_sse_data();
#endif
if (!spline->coefs) {
fprintf (stderr, "Out of memory allocating spline coefficients in create_multi_UBspline_1d_c.\n");
abort();
}
return spline;
}
void
set_multi_UBspline_1d_c (multi_UBspline_1d_c* spline, int num, complex_float *data)
{
complex_float *coefs = spline->coefs + num;
BCtype_s xBC_r, xBC_i;
xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode;
xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r;
xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode;
xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i;
int xs = spline->x_stride;
// Real part
find_coefs_1d_s (spline->x_grid, xBC_r,
(float*)data, (intptr_t)2, (float*)coefs, (intptr_t)2*xs);
// Imaginarty part
find_coefs_1d_s (spline->x_grid, xBC_i,
((float*)data)+1, (intptr_t)2, ((float*)coefs+1), (intptr_t)2*xs);
}
multi_UBspline_2d_c*
create_multi_UBspline_2d_c (Ugrid x_grid, Ugrid y_grid,
BCtype_c xBC, BCtype_c yBC, int num_splines)
{
// Create new spline
multi_UBspline_2d_c* restrict spline = malloc (sizeof(multi_UBspline_2d_c));
if (!spline) {
fprintf (stderr, "Out of memory allocating spline in create_multi_UBspline_2d_c.\n");
abort();
}
spline->spcode = MULTI_U2D;
spline->tcode = SINGLE_COMPLEX;
spline->xBC = xBC;
spline->yBC = yBC;
spline->num_splines = num_splines;
// Setup internal variables
int Mx = x_grid.num;
int My = y_grid.num;
int Nx, Ny;
if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC)
Nx = Mx+3;
else
Nx = Mx+2;
x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3);
x_grid.delta_inv = 1.0/x_grid.delta;
spline->x_grid = x_grid;
if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC)
Ny = My+3;
else
Ny = My+2;
y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3);
y_grid.delta_inv = 1.0/y_grid.delta;
spline->y_grid = y_grid;
int N = num_splines;
#ifdef HAVE_SSE
if (N % 2)
N++;
#endif
spline->x_stride = Ny*N;
spline->y_stride = N;
#ifndef HAVE_POSIX_MEMALIGN
spline->coefs = malloc (2*sizeof(float)*Nx*Ny*N);
spline->lapl2 = malloc (4*sizeof(float)*N);
#else
posix_memalign ((void**)&spline->coefs, 64,
2*sizeof(float)*Nx*Ny*N);
posix_memalign ((void**)&spline->lapl2, 64,
4*sizeof(float)*N);
#endif
#ifdef HAVE_SSE
init_sse_data();
#endif
if (!spline->coefs || !spline->lapl2) {
fprintf (stderr, "Out of memory allocating spline coefficients in create_multi_UBspline_2d_c.\n");
abort();
}
return spline;
}
void
set_multi_UBspline_2d_c (multi_UBspline_2d_c* spline, int num, complex_float *data)
{
// Setup internal variables
int Mx = spline->x_grid.num;
int My = spline->y_grid.num;
int Nx, Ny;
complex_float* coefs = spline->coefs + num;
if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC)
Nx = Mx+3;
else
Nx = Mx+2;
if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC)
Ny = My+3;
else
Ny = My+2;
BCtype_s xBC_r, xBC_i, yBC_r, yBC_i;
xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode;
xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r;
xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode;
xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i;
yBC_r.lCode = spline->yBC.lCode; yBC_r.rCode = spline->yBC.rCode;
yBC_r.lVal = spline->yBC.lVal_r; yBC_r.rVal = spline->yBC.rVal_r;
yBC_i.lCode = spline->yBC.lCode; yBC_i.rCode = spline->yBC.rCode;
yBC_i.lVal = spline->yBC.lVal_i; yBC_i.rVal = spline->yBC.rVal_i;
int ys = spline->y_stride;
// First, solve in the X-direction
for (int iy=0; iy<My; iy++) {
intptr_t doffset = (2*iy);
intptr_t coffset = (2*iy)*ys;
// Real part
find_coefs_1d_s (spline->x_grid, xBC_r, ((float*)data)+doffset, (intptr_t)2*My,
(float*)coefs+coffset, (intptr_t)2*Ny*ys);
// Imag part
find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+doffset+1, (intptr_t)2*My,
((float*)coefs)+coffset+1, (intptr_t)2*Ny*ys);
}
// Now, solve in the Y-direction
for (int ix=0; ix<Nx; ix++) {
intptr_t doffset = (2*ix*Ny)*ys;
intptr_t coffset = (2*ix*Ny)*ys;
// Real part
find_coefs_1d_s (spline->y_grid, yBC_r, ((float*)coefs)+doffset,
(intptr_t)2*ys, ((float*)coefs)+coffset, (intptr_t)2*ys);
// Imag part
find_coefs_1d_s (spline->y_grid, yBC_i, ((float*)coefs)+doffset+1,
(intptr_t)2*ys, ((float*)coefs)+coffset+1, (intptr_t)2*ys);
}
}
multi_UBspline_3d_c*
create_multi_UBspline_3d_c (Ugrid x_grid, Ugrid y_grid, Ugrid z_grid,
BCtype_c xBC, BCtype_c yBC, BCtype_c zBC,
int num_splines)
{
// Create new spline
multi_UBspline_3d_c* restrict spline = malloc (sizeof(multi_UBspline_3d_c));
if (!spline) {
fprintf (stderr, "Out of memory allocating spline in create_multi_UBspline_3d_c.\n");
abort();
}
spline->spcode = MULTI_U3D;
spline->tcode = SINGLE_COMPLEX;
spline->xBC = xBC;
spline->yBC = yBC;
spline->zBC = zBC;
spline->num_splines = num_splines;
// Setup internal variables
int Mx = x_grid.num; int My = y_grid.num; int Mz = z_grid.num;
int Nx, Ny, Nz;
if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC)
Nx = Mx+3;
else
Nx = Mx+2;
x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3);
x_grid.delta_inv = 1.0/x_grid.delta;
spline->x_grid = x_grid;
if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC)
Ny = My+3;
else
Ny = My+2;
y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3);
y_grid.delta_inv = 1.0/y_grid.delta;
spline->y_grid = y_grid;
if (zBC.lCode == PERIODIC || zBC.lCode == ANTIPERIODIC)
Nz = Mz+3;
else
Nz = Mz+2;
z_grid.delta = (z_grid.end - z_grid.start)/(double)(Nz-3);
z_grid.delta_inv = 1.0/z_grid.delta;
spline->z_grid = z_grid;
int N = spline->num_splines;
#ifdef HAVE_SSE
if (N % 2)
N++;
#endif
spline->x_stride = Ny*Nz*N;
spline->y_stride = Nz*N;
spline->z_stride = N;
#ifndef HAVE_POSIX_MEMALIGN
spline->coefs = malloc ((size_t)2*sizeof(float)*Nx*Ny*Nz*N);
spline->lapl3 = malloc (6*sizeof(float)*N);
#else
posix_memalign ((void**)&spline->coefs, 64, (size_t)2*sizeof(float)*Nx*Ny*Nz*N);
posix_memalign ((void**)&spline->lapl3, 64, 6*sizeof(float)*N);
#endif
spline->coefs_size=(size_t)Nx*(size_t)Ny*(size_t)Nz*(size_t)N;
#ifdef HAVE_SSE
init_sse_data();
#endif
if (!spline->coefs || !spline->lapl3) {
fprintf (stderr, "Out of memory allocating spline coefficients in create_multi_UBspline_3d_c.\n");
abort();
}
return spline;
}
void
set_multi_UBspline_3d_c (multi_UBspline_3d_c* spline, int num, complex_float *data)
{
// Setup internal variables
int Mx = spline->x_grid.num;
int My = spline->y_grid.num;
int Mz = spline->z_grid.num;
int Nx, Ny, Nz;
if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC)
Nx = Mx+3;
else
Nx = Mx+2;
if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC)
Ny = My+3;
else
Ny = My+2;
if (spline->zBC.lCode == PERIODIC || spline->zBC.lCode == ANTIPERIODIC)
Nz = Mz+3;
else
Nz = Mz+2;
BCtype_s xBC_r, xBC_i, yBC_r, yBC_i, zBC_r, zBC_i;
xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode;
xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r;
xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode;
xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i;
yBC_r.lCode = spline->yBC.lCode; yBC_r.rCode = spline->yBC.rCode;
yBC_r.lVal = spline->yBC.lVal_r; yBC_r.rVal = spline->yBC.rVal_r;
yBC_i.lCode = spline->yBC.lCode; yBC_i.rCode = spline->yBC.rCode;
yBC_i.lVal = spline->yBC.lVal_i; yBC_i.rVal = spline->yBC.rVal_i;
zBC_r.lCode = spline->zBC.lCode; zBC_r.rCode = spline->zBC.rCode;
zBC_r.lVal = spline->zBC.lVal_r; zBC_r.rVal = spline->zBC.rVal_r;
zBC_i.lCode = spline->zBC.lCode; zBC_i.rCode = spline->zBC.rCode;
zBC_i.lVal = spline->zBC.lVal_i; zBC_i.rVal = spline->zBC.rVal_i;
complex_float *coefs = spline->coefs + num;
int zs = spline->z_stride;
// First, solve in the X-direction
for (int iy=0; iy<My; iy++)
for (int iz=0; iz<Mz; iz++) {
intptr_t doffset = 2*(iy*Mz+iz);
intptr_t coffset = 2*(iy*Nz+iz)*zs;
// Real part
find_coefs_1d_s (spline->x_grid, xBC_r,
((float*)data)+doffset, (intptr_t)2*My*Mz,
((float*)coefs)+coffset, (intptr_t)2*Ny*Nz*zs);
// Imag part
find_coefs_1d_s (spline->x_grid, xBC_i,
((float*)data)+doffset+1, (intptr_t)2*My*Mz,
((float*)coefs)+coffset+1, (intptr_t)2*Ny*Nz*zs);
}
// Now, solve in the Y-direction
for (int ix=0; ix<Nx; ix++)
for (int iz=0; iz<Nz; iz++) {
intptr_t doffset = 2*(ix*Ny*Nz + iz)*zs;
intptr_t coffset = 2*(ix*Ny*Nz + iz)*zs;
// Real part
find_coefs_1d_s (spline->y_grid, yBC_r,
((float*)coefs)+doffset, (intptr_t)2*Nz*zs,
((float*)coefs)+coffset, (intptr_t)2*Nz*zs);
// Imag part
find_coefs_1d_s (spline->y_grid, yBC_i,
((float*)coefs)+doffset+1, (intptr_t)2*Nz*zs,
((float*)coefs)+coffset+1, (intptr_t)2*Nz*zs);
}
// Now, solve in the Z-direction
for (int ix=0; ix<Nx; ix++)
for (int iy=0; iy<Ny; iy++) {
intptr_t doffset = 2*((ix*Ny+iy)*Nz)*zs;
intptr_t coffset = 2*((ix*Ny+iy)*Nz)*zs;
// Real part
find_coefs_1d_s (spline->z_grid, zBC_r,
((float*)coefs)+doffset, (intptr_t)2*zs,
((float*)coefs)+coffset, (intptr_t)2*zs);
// Imag part
find_coefs_1d_s (spline->z_grid, zBC_i,
((float*)coefs)+doffset+1, (intptr_t)2*zs,
((float*)coefs)+coffset+1, (intptr_t)2*zs);
}
}
void
set_multi_UBspline_3d_c_z (multi_UBspline_3d_c* spline, int num, complex_double *data)
{
// Setup internal variables
int Mx = spline->x_grid.num;
int My = spline->y_grid.num;
int Mz = spline->z_grid.num;
int Nx, Ny, Nz;
if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC)
Nx = Mx+3;
else
Nx = Mx+2;
if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC)
Ny = My+3;
else
Ny = My+2;
if (spline->zBC.lCode == PERIODIC || spline->zBC.lCode == ANTIPERIODIC)
Nz = Mz+3;
else
Nz = Mz+2;
BCtype_d xBC_r, xBC_i, yBC_r, yBC_i, zBC_r, zBC_i;
xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode;
xBC_r.lVal = (double)spline->xBC.lVal_r; xBC_r.rVal = (double)spline->xBC.rVal_r;
xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode;
xBC_i.lVal = (double)spline->xBC.lVal_i; xBC_i.rVal = (double)spline->xBC.rVal_i;
yBC_r.lCode = spline->yBC.lCode; yBC_r.rCode = spline->yBC.rCode;
yBC_r.lVal = (double)spline->yBC.lVal_r; yBC_r.rVal = (double)spline->yBC.rVal_r;
yBC_i.lCode = spline->yBC.lCode; yBC_i.rCode = spline->yBC.rCode;
yBC_i.lVal = (double)spline->yBC.lVal_i; yBC_i.rVal = (double)spline->yBC.rVal_i;
zBC_r.lCode = spline->zBC.lCode; zBC_r.rCode = spline->zBC.rCode;
zBC_r.lVal = (double)spline->zBC.lVal_r; zBC_r.rVal = (double)spline->zBC.rVal_r;
zBC_i.lCode = spline->zBC.lCode; zBC_i.rCode = spline->zBC.rCode;
zBC_i.lVal = (double)spline->zBC.lVal_i; zBC_i.rVal = (double)spline->zBC.rVal_i;
complex_double *spline_tmp = malloc(2*sizeof(double)*Nx*Ny*Nz);
// First, solve in the X-direction
for (int iy=0; iy<My; iy++)
for (int iz=0; iz<Mz; iz++) {
intptr_t doffset = 2*(iy*Mz+iz);
intptr_t coffset = 2*(iy*Nz+iz);
// Real part
find_coefs_1d_d (spline->x_grid, xBC_r, ((double*)data)+doffset, 2*My*Mz,
((double*)spline_tmp)+coffset, 2*Ny*Nz);
// Imag part
find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+doffset+1, 2*My*Mz,
((double*)spline_tmp)+coffset+1, 2*Ny*Nz);
}
// Now, solve in the Y-direction
for (int ix=0; ix<Nx; ix++)
for (int iz=0; iz<Nz; iz++) {
intptr_t doffset = 2*(ix*Ny*Nz + iz);
intptr_t coffset = 2*(ix*Ny*Nz + iz);
// Real part
find_coefs_1d_d (spline->y_grid, yBC_r, ((double*)spline_tmp)+doffset, 2*Nz,
((double*)spline_tmp)+coffset, 2*Nz);
// Imag part
find_coefs_1d_d (spline->y_grid, yBC_i, ((double*)spline_tmp)+doffset+1, 2*Nz,
((double*)spline_tmp)+coffset+1, 2*Nz);
}
// Now, solve in the Z-direction
for (int ix=0; ix<Nx; ix++)
for (int iy=0; iy<Ny; iy++) {
intptr_t doffset = 2*((ix*Ny+iy)*Nz);
intptr_t coffset = 2*((ix*Ny+iy)*Nz);
// Real part
find_coefs_1d_d (spline->z_grid, zBC_r, ((double*)spline_tmp)+doffset, 2,
((double*)spline_tmp)+coffset, 2);
// Imag part
find_coefs_1d_d (spline->z_grid, zBC_i, ((double*)spline_tmp)+doffset+1, 2,
((double*)spline_tmp)+coffset+1, 2);
}
{
const complex_double* restrict i_ptr=spline_tmp;
for(int ix=0; ix<Nx; ++ix)
for(int iy=0; iy<Ny; ++iy)
for(int iz=0; iz<Nz; ++iz)
spline->coefs[ix*spline->x_stride +
iy*spline->y_stride +
iz*spline->z_stride + num] = (complex_float)(*i_ptr++);
}
free(spline_tmp);
}
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
//// Double-Precision, Real Creation Routines ////
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
// On input, bands should be filled with:
// row 0 : abcdInitial from boundary conditions
// rows 1:M: basis functions in first 3 cols, data in last
// row M+1 : abcdFinal from boundary conditions
// cstride gives the stride between values in coefs.
// On exit, coefs with contain interpolating B-spline coefs
void
solve_deriv_interp_1d_d (double bands[], double coefs[],
int M, int cstride);
// On input, bands should be filled with:
// row 0 : abcdInitial from boundary conditions
// rows 1:M: basis functions in first 3 cols, data in last
// row M+1 : abcdFinal from boundary conditions
// cstride gives the stride between values in coefs.
// On exit, coefs with contain interpolating B-spline coefs
void
solve_periodic_interp_1d_d (double bands[], double coefs[],
int M, intptr_t cstride);
void
find_coefs_1d_d (Ugrid grid, BCtype_d bc,
double *data, intptr_t dstride,
double *coefs, intptr_t cstride);
multi_UBspline_1d_d*
create_multi_UBspline_1d_d (Ugrid x_grid, BCtype_d xBC, int num_splines)
{
// Create new spline
multi_UBspline_1d_d* restrict spline = malloc (sizeof(multi_UBspline_1d_d));
if (!spline) {
fprintf (stderr, "Out of memory allocating spline in create_multi_UBspline_1d_d.\n");
abort();
}
spline->spcode = MULTI_U1D;
spline->tcode = DOUBLE_REAL;
spline->xBC = xBC;
spline->num_splines = num_splines;
// Setup internal variables
int Mx = x_grid.num;
int Nx;
if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) {
x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num);
Nx = Mx+3;
}
else {
x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num-1);
Nx = Mx+2;
}
x_grid.delta_inv = 1.0/x_grid.delta;
spline->x_grid = x_grid;
int N = num_splines;
#ifdef HAVE_SSE2
// We must pad to keep data aligned for SSE operations
if (N & 1)
N++;
#endif
spline->x_stride = N;
#ifndef HAVE_POSIX_MEMALIGN
spline->coefs = malloc (sizeof(double)*Nx*N);
#else
posix_memalign ((void**)&spline->coefs, 64, sizeof(double)*Nx*N);
#endif
spline->coefs_size=(size_t)Nx*(size_t)N;
#ifdef HAVE_SSE2
init_sse_data();
#endif
if (!spline->coefs) {
fprintf (stderr, "Out of memory allocating spline coefficients in create_multi_UBspline_1d_d.\n");
abort();
}
return spline;
}
void
set_multi_UBspline_1d_d (multi_UBspline_1d_d* spline, int num, double *data)
{
double *coefs = spline->coefs + num;
int xs = spline->x_stride;
find_coefs_1d_d (spline->x_grid, spline->xBC, data, 1, coefs, xs);
}
void
set_multi_UBspline_1d_d_BC (multi_UBspline_1d_d* spline, int num, double *data,
BCtype_d xBC)
{
double *coefs = spline->coefs + num;
int xs = spline->x_stride;
find_coefs_1d_d (spline->x_grid, xBC, data, 1, coefs, xs);
}
multi_UBspline_2d_d*
create_multi_UBspline_2d_d (Ugrid x_grid, Ugrid y_grid,
BCtype_d xBC, BCtype_d yBC, int num_splines)
{
// Create new spline
multi_UBspline_2d_d* restrict spline = malloc (sizeof(multi_UBspline_2d_d));
if (!spline) {
fprintf (stderr, "Out of memory allocating spline in create_multi_UBspline_2d_d.\n");
abort();
}
spline->spcode = MULTI_U2D;
spline->tcode = DOUBLE_REAL;
spline->xBC = xBC;
spline->yBC = yBC;
spline->num_splines = num_splines;
// Setup internal variables
int Mx = x_grid.num;
int My = y_grid.num;
int Nx, Ny;
if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC)
Nx = Mx+3;
else
Nx = Mx+2;
x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3);
x_grid.delta_inv = 1.0/x_grid.delta;
spline->x_grid = x_grid;
if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC)
Ny = My+3;
else
Ny = My+2;
y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3);
y_grid.delta_inv = 1.0/y_grid.delta;
spline->y_grid = y_grid;
int N = num_splines;
#ifdef HAVE_SSE2
// We must pad to keep data align for SSE operations
if (num_splines & 1)
N++;
#endif
spline->x_stride = Ny*N;
spline->y_stride = N;
#ifndef HAVE_POSIX_MEMALIGN
spline->coefs = malloc (sizeof(double)*Nx*Ny*N);
#else
posix_memalign ((void**)&spline->coefs, 64, (sizeof(double)*Nx*Ny*N));
#endif
#ifdef HAVE_SSE2
init_sse_data();
#endif
if (!spline->coefs) {
fprintf (stderr, "Out of memory allocating spline coefficients in create_multi_UBspline_2d_d.\n");
abort();
}
return spline;
}
void
set_multi_UBspline_2d_d (multi_UBspline_2d_d* spline, int num, double *data)
{
int Mx = spline->x_grid.num;
int My = spline->y_grid.num;
int Nx, Ny;
double *coefs = spline->coefs + num;
if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC)
Nx = Mx+3;
else
Nx = Mx+2;
if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC)
Ny = My+3;
else
Ny = My+2;
int ys = spline->y_stride;
// First, solve in the X-direction
for (int iy=0; iy<My; iy++) {
intptr_t doffset = iy;
intptr_t coffset = iy*ys;
find_coefs_1d_d (spline->x_grid, spline->xBC,
data+doffset, (intptr_t)My,
coefs+coffset, (intptr_t)Ny*ys);
}
// Now, solve in the Y-direction
for (int ix=0; ix<Nx; ix++) {
intptr_t doffset = ix*Ny*ys;
intptr_t coffset = ix*Ny*ys;
find_coefs_1d_d (spline->y_grid, spline->yBC,
coefs+doffset, (intptr_t)ys,
coefs+coffset, (intptr_t)ys);
}
}
multi_UBspline_3d_d*
create_multi_UBspline_3d_d (Ugrid x_grid, Ugrid y_grid, Ugrid z_grid,
BCtype_d xBC, BCtype_d yBC, BCtype_d zBC,
int num_splines)
{
// Create new spline
multi_UBspline_3d_d* restrict spline;
#ifdef HAVE_POSIX_MEMALIGN
posix_memalign ((void**)&spline, 64, (size_t)sizeof(multi_UBspline_3d_d));
#else
spline = malloc (sizeof(multi_UBspline_3d_d));
#endif
if (!spline) {
fprintf (stderr, "Out of memory allocating spline in create_multi_UBspline_3d_d.\n");
abort();
}
spline->spcode = MULTI_U3D;
spline->tcode = DOUBLE_REAL;
spline->xBC = xBC;
spline->yBC = yBC;
spline->zBC = zBC;
spline->num_splines = num_splines;
// Setup internal variables
int Mx = x_grid.num; int My = y_grid.num; int Mz = z_grid.num;
int Nx, Ny, Nz;
if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC)
Nx = Mx+3;
else
Nx = Mx+2;
x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3);
x_grid.delta_inv = 1.0/x_grid.delta;
spline->x_grid = x_grid;
if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC)
Ny = My+3;
else
Ny = My+2;
y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3);
y_grid.delta_inv = 1.0/y_grid.delta;
spline->y_grid = y_grid;
if (zBC.lCode == PERIODIC || zBC.lCode == ANTIPERIODIC)
Nz = Mz+3;
else
Nz = Mz+2;
z_grid.delta = (z_grid.end - z_grid.start)/(double)(Nz-3);
z_grid.delta_inv = 1.0/z_grid.delta;
spline->z_grid = z_grid;
int N = num_splines;
#if defined HAVE_SSE2
// We must pad to keep data align for SSE operations
if (N & 1)
N++;
#endif
spline->x_stride = Ny*Nz*N;
spline->y_stride = Nz*N;
spline->z_stride = N;
#ifdef HAVE_POSIX_MEMALIGN
posix_memalign ((void**)&spline->coefs, 64, ((size_t)sizeof(double)*Nx*Ny*Nz*N));
#else
spline->coefs = malloc ((size_t)sizeof(double)*Nx*Ny*Nz*N);
#endif
spline->coefs_size=(size_t)Nx*(size_t)Ny*(size_t)Nz*(size_t)N;
#ifdef HAVE_SSE2
init_sse_data();
#endif
if (!spline->coefs) {
fprintf (stderr, "Out of memory allocating spline coefficients in create_multi_UBspline_3d_d.\n");
abort();
}
return spline;
}
void
set_multi_UBspline_3d_d (multi_UBspline_3d_d* spline, int num, double *data)
{
int Mx = spline->x_grid.num;
int My = spline->y_grid.num;
int Mz = spline->z_grid.num;
int Nx, Ny, Nz;
if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC)
Nx = Mx+3;
else
Nx = Mx+2;
if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC)
Ny = My+3;
else
Ny = My+2;
if (spline->zBC.lCode == PERIODIC || spline->zBC.lCode == ANTIPERIODIC)
Nz = Mz+3;
else
Nz = Mz+2;
double *coefs = spline->coefs + num;
intptr_t zs = spline->z_stride;
// First, solve in the X-direction
#pragma omp parallel for
for (int iy=0; iy<My; iy++)
for (int iz=0; iz<Mz; iz++) {
intptr_t doffset = iy*Mz+iz;
intptr_t coffset = (iy*Nz+iz)*zs;
find_coefs_1d_d (spline->x_grid, spline->xBC,
data+doffset, (intptr_t)My*Mz,
coefs+coffset, (intptr_t)Ny*Nz*zs);
}
// Now, solve in the Y-direction
#pragma omp parallel for
for (int ix=0; ix<Nx; ix++)
for (int iz=0; iz<Nz; iz++) {
intptr_t doffset = (ix*Ny*Nz + iz)*zs;
intptr_t coffset = (ix*Ny*Nz + iz)*zs;
find_coefs_1d_d (spline->y_grid, spline->yBC,
coefs+doffset, (intptr_t)Nz*zs,
coefs+coffset, (intptr_t)Nz*zs);
}
// Now, solve in the Z-direction
#pragma omp parallel for
for (int ix=0; ix<Nx; ix++)
for (int iy=0; iy<Ny; iy++) {
intptr_t doffset = (ix*Ny+iy)*Nz*zs;
intptr_t coffset = (ix*Ny+iy)*Nz*zs;
find_coefs_1d_d (spline->z_grid, spline->zBC,
coefs+doffset, (intptr_t)zs,
coefs+coffset, (intptr_t)zs);
}
}
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
//// Double-Precision, Complex Creation Routines ////
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
// On input, bands should be filled with:
// row 0 : abcdInitial from boundary conditions
// rows 1:M: basis functions in first 3 cols, data in last
// row M+1 : abcdFinal from boundary conditions
// cstride gives the stride between values in coefs.
// On exit, coefs with contain interpolating B-spline coefs
multi_UBspline_1d_z*
create_multi_UBspline_1d_z (Ugrid x_grid, BCtype_z xBC, int num_splines)
{
// Create new spline
multi_UBspline_1d_z* restrict spline = malloc (sizeof(multi_UBspline_1d_z));
if (!spline) {
fprintf (stderr, "Out of memory allocating spline in create_multi_UBspline_1d_z.\n");
abort();
}
spline->spcode = MULTI_U1D;
spline->tcode = DOUBLE_COMPLEX;
spline->xBC = xBC;
spline->num_splines = num_splines;
// Setup internal variables
int Mx = x_grid.num;
int Nx;
if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) {
x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num);
Nx = Mx+3;
}
else {
x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num-1);
Nx = Mx+2;
}
x_grid.delta_inv = 1.0/x_grid.delta;
spline->x_grid = x_grid;
spline->x_stride = num_splines;
#ifndef HAVE_POSIX_MEMALIGN
spline->coefs = malloc (2*sizeof(double)*Nx*num_splines);
#else
posix_memalign ((void**)&spline->coefs, 64, 2*sizeof(double)*Nx*num_splines);
#endif
spline->coefs_size=(size_t)Nx*(size_t)num_splines;
#ifdef HAVE_SSE2
init_sse_data();
#endif
if (!spline->coefs) {
fprintf (stderr, "Out of memory allocating spline coefficients in create_multi_UBspline_1d_z.\n");
abort();
}
return spline;
}
void
set_multi_UBspline_1d_z (multi_UBspline_1d_z* spline, int num, complex_double *data)
{
int Mx = spline->x_grid.num;
int Nx;
complex_double *coefs = spline->coefs + num;
if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC)
Nx = Mx+3;
else
Nx = Mx+2;
BCtype_d xBC_r, xBC_i;
xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode;
xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r;
xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode;
xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i;
int xs = spline->x_stride;
// Real part
find_coefs_1d_d (spline->x_grid, xBC_r,
(double*)data, (intptr_t)2,
((double*)coefs), (intptr_t)2*xs);
// Imaginary part
find_coefs_1d_d (spline->x_grid, xBC_i,
((double*)data)+1, (intptr_t)2,
((double*)coefs)+1, (intptr_t)2*xs);
}
void
set_multi_UBspline_1d_z_BC (multi_UBspline_1d_z *spline, int num,
complex_double *data, BCtype_z xBC)
{
int Mx = spline->x_grid.num;
int Nx;
complex_double *coefs = spline->coefs + num;
if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC)
Nx = Mx+3;
else
Nx = Mx+2;
BCtype_d xBC_r, xBC_i;
xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode;
xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r;
xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode;
xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i;
int xs = spline->x_stride;
// Real part
find_coefs_1d_d (spline->x_grid, xBC_r,
(double*)data, (intptr_t)2,
((double*)coefs), (intptr_t)2*xs);
// Imaginary part
find_coefs_1d_d (spline->x_grid, xBC_i,
((double*)data)+1, (intptr_t)2,
((double*)coefs)+1, (intptr_t)2*xs);
}
multi_UBspline_2d_z*
create_multi_UBspline_2d_z (Ugrid x_grid, Ugrid y_grid,
BCtype_z xBC, BCtype_z yBC, int num_splines)
{
// Create new spline
multi_UBspline_2d_z* restrict spline = malloc (sizeof(multi_UBspline_2d_z));
if (!spline) {
fprintf (stderr, "Out of memory allocating spline in create_multi_UBspline_2d_z.\n");
abort();
}
spline->spcode = MULTI_U2D;
spline->tcode = DOUBLE_COMPLEX;
spline->xBC = xBC;
spline->yBC = yBC;
spline->num_splines = num_splines;
// Setup internal variables
int Mx = x_grid.num;
int My = y_grid.num;
int Nx, Ny;
if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC)
Nx = Mx+3;
else
Nx = Mx+2;
x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3);
x_grid.delta_inv = 1.0/x_grid.delta;
spline->x_grid = x_grid;
if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC)
Ny = My+3;
else
Ny = My+2;
y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3);
y_grid.delta_inv = 1.0/y_grid.delta;
spline->y_grid = y_grid;
spline->x_stride = Ny*num_splines;
spline->y_stride = num_splines;
#ifndef HAVE_POSIX_MEMALIGN
spline->coefs = malloc (2*sizeof(double)*Nx*Ny*num_splines);
spline->lapl2 = malloc (4*sizeof(double)*num_splines);
#else
posix_memalign ((void**)&spline->coefs, 64, 2*sizeof(double)*Nx*Ny*num_splines);
posix_memalign ((void**)&spline->lapl2, 64, 4*sizeof(double)*num_splines);
#endif
#ifdef HAVE_SSE2
init_sse_data();
#endif
if (!spline->coefs || !spline->lapl2) {
fprintf (stderr, "Out of memory allocating spline coefficients in create_multi_UBspline_2d_z.\n");
abort();
}
return spline;
}
void
set_multi_UBspline_2d_z (multi_UBspline_2d_z* spline, int num,
complex_double *data)
{
int Mx = spline->x_grid.num;
int My = spline->y_grid.num;
int Nx, Ny;
if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC)
Nx = Mx+3;
else
Nx = Mx+2;
if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC)
Ny = My+3;
else
Ny = My+2;
BCtype_d xBC_r, xBC_i, yBC_r, yBC_i;
xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode;
xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r;
xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode;
xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i;
yBC_r.lCode = spline->yBC.lCode; yBC_r.rCode = spline->yBC.rCode;
yBC_r.lVal = spline->yBC.lVal_r; yBC_r.rVal = spline->yBC.rVal_r;
yBC_i.lCode = spline->yBC.lCode; yBC_i.rCode = spline->yBC.rCode;
yBC_i.lVal = spline->yBC.lVal_i; yBC_i.rVal = spline->yBC.rVal_i;
complex_double *coefs = spline->coefs + num;
int ys = spline->y_stride;
// First, solve in the X-direction
for (int iy=0; iy<My; iy++) {
intptr_t doffset = 2*iy;
intptr_t coffset = 2*iy*ys;
// Real part
find_coefs_1d_d (spline->x_grid, xBC_r,
((double*)data+doffset), (intptr_t)2*My,
(double*)coefs+coffset, (intptr_t)2*Ny*ys);
// Imag part
find_coefs_1d_d (spline->x_grid, xBC_i,
((double*)data)+doffset+1, (intptr_t)2*My,
((double*)coefs)+coffset+1, (intptr_t)2*Ny*ys);
}
// Now, solve in the Y-direction
for (int ix=0; ix<Nx; ix++) {
intptr_t doffset = 2*ix*Ny*ys;
intptr_t coffset = 2*ix*Ny*ys;
// Real part
find_coefs_1d_d (spline->y_grid, yBC_r,
((double*)coefs)+doffset, (intptr_t)2*ys,
(double*)coefs+coffset, (intptr_t)2*ys);
// Imag part
find_coefs_1d_d (spline->y_grid, yBC_i,
(double*)coefs+doffset+1, (intptr_t)2*ys,
((double*)coefs)+coffset+1, (intptr_t)2*ys);
}
}
multi_UBspline_3d_z*
create_multi_UBspline_3d_z (Ugrid x_grid, Ugrid y_grid, Ugrid z_grid,
BCtype_z xBC, BCtype_z yBC, BCtype_z zBC,
int num_splines)
{
// Create new spline
multi_UBspline_3d_z* restrict spline = malloc (sizeof(multi_UBspline_3d_z));
if (!spline) {
fprintf (stderr, "Out of memory allocating spline in create_multi_UBspline_3d_z.\n");
abort();
}
spline->spcode = MULTI_U3D;
spline->tcode = DOUBLE_COMPLEX;
spline->xBC = xBC;
spline->yBC = yBC;
spline->zBC = zBC;
spline->num_splines = num_splines;
// Setup internal variables
int Mx = x_grid.num; int My = y_grid.num; int Mz = z_grid.num;
int Nx, Ny, Nz;
if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC)
Nx = Mx+3;
else
Nx = Mx+2;
x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3);
x_grid.delta_inv = 1.0/x_grid.delta;
spline->x_grid = x_grid;
if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC)
Ny = My+3;
else
Ny = My+2;
y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3);
y_grid.delta_inv = 1.0/y_grid.delta;
spline->y_grid = y_grid;
if (zBC.lCode == PERIODIC || zBC.lCode == ANTIPERIODIC)
Nz = Mz+3;
else
Nz = Mz+2;
z_grid.delta = (z_grid.end - z_grid.start)/(double)(Nz-3);
z_grid.delta_inv = 1.0/z_grid.delta;
spline->z_grid = z_grid;
int N = num_splines;
#ifdef HAVE_SSE2
if (N & 3)
N += 4-(N & 3);
#endif
spline->x_stride = (intptr_t)Ny*(intptr_t)Nz*N;
spline->y_stride = Nz*N;
spline->z_stride = N;
#ifndef HAVE_POSIX_MEMALIGN
spline->coefs = malloc ((size_t)2*sizeof(double)*Nx*Ny*Nz*N);
spline->lapl3 = malloc (6*sizeof(double)*N);
#else
posix_memalign ((void**)&spline->coefs, 64, (size_t)2*sizeof(double)*Nx*Ny*Nz*N);
posix_memalign ((void**)&spline->lapl3, 64, 6*sizeof(double)*N);
#endif
spline->coefs_size=(size_t)Nx*(size_t)Ny*(size_t)Nz*(size_t)N;
#ifdef HAVE_SSE2
init_sse_data();
#endif
if (!spline->coefs || !spline->lapl3) {
fprintf (stderr, "Out of memory allocating spline coefficients in create_multi_UBspline_3d_z.\n");
abort();
}
return spline;
}
void
set_multi_UBspline_3d_z (multi_UBspline_3d_z* spline, int num, complex_double *data)
{
// Setup internal variables
int Mx = spline->x_grid.num;
int My = spline->y_grid.num;
int Mz = spline->z_grid.num;
int Nx, Ny, Nz;
if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC)
Nx = Mx+3;
else
Nx = Mx+2;
if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC)
Ny = My+3;
else
Ny = My+2;
if (spline->zBC.lCode == PERIODIC || spline->zBC.lCode == ANTIPERIODIC)
Nz = Mz+3;
else
Nz = Mz+2;
BCtype_d xBC_r, xBC_i, yBC_r, yBC_i, zBC_r, zBC_i;
xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode;
xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r;
xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode;
xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i;
yBC_r.lCode = spline->yBC.lCode; yBC_r.rCode = spline->yBC.rCode;
yBC_r.lVal = spline->yBC.lVal_r; yBC_r.rVal = spline->yBC.rVal_r;
yBC_i.lCode = spline->yBC.lCode; yBC_i.rCode = spline->yBC.rCode;
yBC_i.lVal = spline->yBC.lVal_i; yBC_i.rVal = spline->yBC.rVal_i;
zBC_r.lCode = spline->zBC.lCode; zBC_r.rCode = spline->zBC.rCode;
zBC_r.lVal = spline->zBC.lVal_r; zBC_r.rVal = spline->zBC.rVal_r;
zBC_i.lCode = spline->zBC.lCode; zBC_i.rCode = spline->zBC.rCode;
zBC_i.lVal = spline->zBC.lVal_i; zBC_i.rVal = spline->zBC.rVal_i;
complex_double *coefs = spline->coefs + num;
int N = spline->num_splines;
int zs = spline->z_stride;
// First, solve in the X-direction
#pragma omp parallel
{
for (int iy=0; iy<My; iy++)
{
#pragma omp for
for (int iz=0; iz<Mz; iz++) {
intptr_t doffset = 2*(iy*Mz+iz);
intptr_t coffset = 2*(iy*Nz+iz)*zs;
// Real part
find_coefs_1d_d (spline->x_grid, xBC_r,
((double*)data)+doffset, (intptr_t)2*My*Mz,
((double*)coefs)+coffset, (intptr_t)2*Ny*Nz*zs);
// Imag part
find_coefs_1d_d (spline->x_grid, xBC_i,
((double*)data)+doffset+1, (intptr_t)2*My*Mz,
((double*)coefs)+coffset+1, (intptr_t)2*Ny*Nz*zs);
}
}
// Now, solve in the Y-direction
for (int ix=0; ix<Nx; ix++)
{
#pragma omp for
for (int iz=0; iz<Nz; iz++) {
intptr_t doffset = 2*(ix*Ny*Nz + iz)*zs;
intptr_t coffset = 2*(ix*Ny*Nz + iz)*zs;
// Real part
find_coefs_1d_d (spline->y_grid, yBC_r,
((double*)coefs)+doffset, (intptr_t)2*Nz*zs,
((double*)coefs)+coffset, (intptr_t)2*Nz*zs);
// Imag part
find_coefs_1d_d (spline->y_grid, yBC_i,
((double*)coefs)+doffset+1, (intptr_t)2*Nz*zs,
((double*)coefs)+coffset+1, (intptr_t)2*Nz*zs);
}
}
// Now, solve in the Z-direction
for (int ix=0; ix<Nx; ix++)
{
#pragma omp for
for (int iy=0; iy<Ny; iy++) {
intptr_t doffset = 2*((ix*Ny+iy)*Nz)*zs;
intptr_t coffset = 2*((ix*Ny+iy)*Nz)*zs;
// Real part
find_coefs_1d_d (spline->z_grid, zBC_r,
((double*)coefs)+doffset, (intptr_t)2*zs,
((double*)coefs)+coffset, (intptr_t)2*zs);
// Imag part
find_coefs_1d_d (spline->z_grid, zBC_i,
((double*)coefs)+doffset+1, (intptr_t)2*zs,
((double*)coefs)+coffset+1, (intptr_t)2*zs);
}
}
}
}
void
destroy_multi_UBspline (Bspline *spline)
{
free (spline->coefs);
free (spline);
}
|
GB_unop__identity_bool_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_bool_uint32)
// op(A') function: GB (_unop_tran__identity_bool_uint32)
// C type: bool
// A type: uint32_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = (bool) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = (bool) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_bool_uint32)
(
bool *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_bool_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
zero_omp.c | /*
* File: zero_omp.c
* CVS: $Id: zero_omp.c,v 1.34 2009/09/10 20:19:49 terpstra Exp $
* Author: Philip Mucci
* mucci@cs.utk.edu
* Mods: Nils Smeds
* smeds@pdc.kth.se
* Anders Nilsson
* anni@pdc.kth.se
*/
/* This file performs the following test: start, stop and timer
functionality for 2 slave OMP threads
- It attempts to use the following two counters. It may use less
depending on hardware counter resource limitations. These are counted
in the default counting domain and default granularity, depending on
the platform. Usually this is the user domain (PAPI_DOM_USER) and
thread context (PAPI_GRN_THR).
+ PAPI_FP_INS
+ PAPI_TOT_CYC
Each thread inside the Thread routine:
- Get cyc.
- Get us.
- Start counters
- Do flops
- Stop and read counters
- Get us.
- Get cyc.
Master serial thread:
- Get us.
- Get cyc.
- Run parallel for loop
- Get us.
- Get cyc.
*/
#include "papi_test.h"
#ifdef _OPENMP
#include <omp.h>
#else
#error "This compiler does not understand OPENMP"
#endif
extern int TESTS_QUIET; /* Declared in test_utils.c */
const PAPI_hw_info_t *hw_info = NULL;
void Thread(int n)
{
int retval, num_tests = 1;
int EventSet1=PAPI_NULL;
int PAPI_event, mask1;
int num_events1;
long long **values;
long long elapsed_us, elapsed_cyc;
char event_name[PAPI_MAX_STR_LEN];
printf("Thread 0x%x started\n", omp_get_thread_num());
num_events1 = 2;
/* add PAPI_TOT_CYC and one of the events in PAPI_FP_INS, PAPI_FP_OPS or
PAPI_TOT_INS, depending on the availability of the event on the
platform */
EventSet1 = add_two_events(&num_events1, &PAPI_event, hw_info, &mask1);
retval = PAPI_event_code_to_name(PAPI_event, event_name);
if (retval != PAPI_OK)
test_fail(__FILE__, __LINE__, "PAPI_event_code_to_name", retval);
values = allocate_test_space(num_tests, num_events1);
elapsed_us = PAPI_get_real_usec();
elapsed_cyc = PAPI_get_real_cyc();
retval = PAPI_start(EventSet1);
if (retval != PAPI_OK)
test_fail(__FILE__, __LINE__, "PAPI_start", retval);
do_flops(n);
retval = PAPI_stop(EventSet1, values[0]);
if (retval != PAPI_OK)
test_fail(__FILE__, __LINE__, "PAPI_stop", retval);
elapsed_us = PAPI_get_real_usec() - elapsed_us;
elapsed_cyc = PAPI_get_real_cyc() - elapsed_cyc;
remove_test_events(&EventSet1, mask1);
if (!TESTS_QUIET) {
printf("Thread 0x%x %-12s : \t%lld\n", omp_get_thread_num(), event_name,
(values[0])[0]);
printf("Thread 0x%x PAPI_TOT_CYC: \t%lld\n", omp_get_thread_num(), (values[0])[1]);
printf("Thread 0x%x Real usec : \t%lld\n", omp_get_thread_num(), elapsed_us);
printf("Thread 0x%x Real cycles : \t%lld\n", omp_get_thread_num(), elapsed_cyc);
}
/* It is illegal for the threads to exit in OpenMP */
/* test_pass(__FILE__,0,0); */
free_test_space(values, num_tests);
PAPI_unregister_thread();
printf("Thread 0x%x finished\n", omp_get_thread_num());
}
int main(int argc, char **argv)
{
int maxthr, retval;
long long elapsed_us, elapsed_cyc;
tests_quiet(argc, argv); /* Set TESTS_QUIET variable */
retval = PAPI_library_init(PAPI_VER_CURRENT);
if (retval != PAPI_VER_CURRENT)
test_fail(__FILE__, __LINE__, "PAPI_library_init", retval);
hw_info = PAPI_get_hardware_info();
if (hw_info == NULL)
test_fail(__FILE__, __LINE__, "PAPI_get_hardware_info", 2);
elapsed_us = PAPI_get_real_usec();
elapsed_cyc = PAPI_get_real_cyc();
retval = PAPI_thread_init((unsigned long (*)(void)) (omp_get_thread_num));
if (retval != PAPI_OK)
{
if (retval == PAPI_ESBSTR)
test_skip(__FILE__, __LINE__, "PAPI_thread_init", retval);
else
test_fail(__FILE__, __LINE__, "PAPI_thread_init", retval);
}
#pragma omp parallel private(maxthr)
{
maxthr = omp_get_num_threads();
Thread(1000000 * (omp_get_thread_num()+1));
}
omp_set_num_threads(1);
Thread(1000000 * (omp_get_thread_num()+1));
omp_set_num_threads(omp_get_max_threads());
#pragma omp parallel private(maxthr)
{
maxthr = omp_get_num_threads();
Thread(1000000 * (omp_get_thread_num()+1));
}
elapsed_cyc = PAPI_get_real_cyc() - elapsed_cyc;
elapsed_us = PAPI_get_real_usec() - elapsed_us;
if (!TESTS_QUIET) {
printf("Master real usec : \t%lld\n", elapsed_us);
printf("Master real cycles : \t%lld\n", elapsed_cyc);
}
test_pass(__FILE__, NULL, 0);
exit(0);
}
|
model_2cxm_mex.c | /*==========================================================
* model_2cxm_mex.c - STARDCE toolbox
*
* Implements the DCE 2-compartment exchange model
*
* The calling syntax is:
*
* C = model_2cxm_mex(time, VIF, vp, ve, kt, fp );
*
* Compilation:
*
* mex -R2018a model_2cxm_mex.c
* or:
* 1) uncomment the compiler directive MATLAB2015
* 2) mex COPTIMFLAGS="\$COPTIMFLAGS -std=c99" model_2cxm_mex.c
*
* Yannick 2020
* Copied from GPUfit by Sam Barnes
*
*========================================================*/
#include "mex.h"
#include <math.h>
#ifdef __GNU__
#include <omp.h>
#endif
#ifndef MAXCORES
#define MAXCORES 1
#endif
// #define MATLAB2015
float dce_2cxm_value_float (
float vp, // vp
float ve, //ve
float kt, //Ktrans
float fp, // Fp
int const point_index, // time points to evaluate teh model at
float const * T, // time point vector
float const * Cp // VIF
)
{
// integral/convolution
float PS;
if(kt>=fp) {
PS = 10e8;
} else {
PS = fp / ((fp / kt) - 1);
}
float convFunc = 0;
float Tp = vp / (PS + fp);
float Te = ve / PS;
float Tb = vp / fp;
float Kpos = 0.5 * (1/Tp + 1/Te + sqrt(pow(1/Tp + 1/Te,2) - 4 * 1/Te * 1/Tb));
float Kneg = 0.5 * (1/Tp + 1/Te - sqrt(pow(1/Tp + 1/Te,2) - 4 * 1/Te * 1/Tb));
float Eneg = (Kpos - 1/Tb) / (Kpos - Kneg);
for (int i = 1; i <= point_index; i++) {
float spacing = T[i] - T[i - 1];
float Ct = Cp[i] * (exp(-(T[point_index] - T[i]) * Kpos) + Eneg * (exp(-(T[point_index] - T[i]) * Kneg) - exp(-Kpos)));//(p2 * exp(-(T[point_index] - T[i])/Tp) + p0 * (1 - exp(-(T[point_index] - T[i])/Tp)));
float Ctprev = Cp[i - 1] * (exp(-(T[point_index] - T[i-1]) * Kpos) + Eneg * (exp(-(T[point_index] - T[i-1]) * Kneg) - exp(-Kpos))); //(p2 * exp(-(T[point_index] - T[i-1])/Tp) + p0 * (1 - exp(-(T[point_index] - T[i-1])/Tp)));
convFunc += ((Ct + Ctprev) / 2 * spacing);
}
float function_value = fp * convFunc;
return function_value;
}
float dce_2cxm_value_double (
double vp, // vp
double ve, //ve
double kt, //Ktrans
double fp, // Fp
int const point_index, // time points to evaluate teh model at
double const * T, // time point vector
double const * Cp // VIF
)
{
// integral/convolution
double PS;
if(kt>=fp) {
PS = 10e8;
} else {
PS = fp / ((fp / kt) - 1);
}
double convFunc = 0;
double Tp = vp / (PS + fp);
double Te = ve / PS;
double Tb = vp / fp;
double Kpos = 0.5 * (1/Tp + 1/Te + sqrt(pow(1/Tp + 1/Te,2) - 4 * 1/Te * 1/Tb));
double Kneg = 0.5 * (1/Tp + 1/Te - sqrt(pow(1/Tp + 1/Te,2) - 4 * 1/Te * 1/Tb));
double Eneg = (Kpos - 1/Tb) / (Kpos - Kneg);
for (int i = 1; i <= point_index; i++) {
double spacing = T[i] - T[i - 1];
double Ct = Cp[i] * (exp(-(T[point_index] - T[i]) * Kpos) + Eneg * (exp(-(T[point_index] - T[i]) * Kneg) - exp(-Kpos)));//(p2 * exp(-(T[point_index] - T[i])/Tp) + p0 * (1 - exp(-(T[point_index] - T[i])/Tp)));
double Ctprev = Cp[i - 1] * (exp(-(T[point_index] - T[i-1]) * Kpos) + Eneg * (exp(-(T[point_index] - T[i-1]) * Kneg) - exp(-Kpos))); //(p2 * exp(-(T[point_index] - T[i-1])/Tp) + p0 * (1 - exp(-(T[point_index] - T[i-1])/Tp)));
convFunc += ((Ct + Ctprev) / 2 * spacing);
}
double function_value = fp * convFunc;
return function_value;
}
/* The gateway function */
void mexFunction( int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[])
{
size_t N, Nt;
mxClassID precision;
float *Cpf, *timef, *vpf, *vef, *ktf, *fpf, *Cf;
double *Cpd, *timed, *vpd, *ved, *ktd, *fpd, *Cd;
/* check for proper number of arguments */
if(nrhs!=6) {
mexErrMsgIdAndTxt("STARDCE:model_standard:nrhs","Six inputs required: time, VIF, vp, ve, kt, fp.");
}
if(nlhs!=1) {
mexErrMsgIdAndTxt("STARDCE:model_standard:nlhs","One output required.");
}
// read all inputs
Nt = mxGetN(prhs[0]);
N = mxGetN(prhs[2]);
if (mxGetClassID(prhs[1]) == mxDOUBLE_CLASS) {
precision = mxDOUBLE_CLASS;
#ifdef MATLAB2015
// try to be backward compatible
timed = mxGetPr(prhs[0]);
Cpd = mxGetPr(prhs[1]);
vpd = mxGetPr(prhs[2]);
ved = mxGetPr(prhs[3]);
ktd = mxGetPr(prhs[4]);
fpd = mxGetPr(prhs[5]);
#else
timed = mxGetDoubles(prhs[0]);
Cpd = mxGetDoubles(prhs[1]);
vpd = mxGetDoubles(prhs[2]);
ved = mxGetDoubles(prhs[3]);
ktd = mxGetDoubles(prhs[4]);
fpd = mxGetDoubles(prhs[5]);
#endif
}
else {
precision = mxSINGLE_CLASS;
#ifdef MATLAB2015
// try to be backward compatible
timef = mxGetData(prhs[0]);
Cpf = mxGetData(prhs[1]);
vpf = mxGetData(prhs[2]);
vef = mxGetData(prhs[3]);
ktf = mxGetData(prhs[4]);
fpf = mxGetData(prhs[5]);
#else
timef = mxGetSingles(prhs[0]);
Cpf = mxGetSingles(prhs[1]);
vpf = mxGetSingles(prhs[2]);
vef = mxGetSingles(prhs[3]);
ktf = mxGetSingles(prhs[4]);
fpf = mxGetSingles(prhs[5]);
#endif
}
// output concentration
plhs[0] = mxCreateNumericMatrix(N, Nt, precision, mxREAL);
#ifdef __GNU__
/* Set number of threads */
omp_set_num_threads(MAXCORES);
#endif
if (precision == mxDOUBLE_CLASS){
#ifdef MATLAB2015
Cd = mxGetPr(plhs[0]);
#else
Cd = mxGetDoubles(plhs[0]);
#endif
#pragma omp parallel for private(n,t)
for (int n = 0; n < N; n++) {
for (int t = 0; t < Nt; t++) {
Cd[t * N + n] = dce_2cxm_value_double(vpd[n], vef[n], ktd[n], fpd[n], t, timed, Cpd);
}
}
} else {
#ifdef MATLAB2015
Cf = mxGetData(plhs[0]);
#else
Cf = mxGetSingles(plhs[0]);
#endif
#pragma omp parallel for private(n,t)
for (int n = 0; n < N; n++) {
for (int t = 0; t < Nt; t++) {
Cf[t * N + n] = dce_2cxm_value_float(vpf[n], vef[n], ktf[n], fpf[n], t, timef, Cpf);
}
}
}
}
|
nvector_openmp.c | /* -----------------------------------------------------------------
* Programmer(s): David J. Gardner and Carol S. Woodward @ LLNL
* -----------------------------------------------------------------
* Acknowledgements: This NVECTOR module is based on the NVECTOR
* Serial module by Scott D. Cohen, Alan C.
* Hindmarsh, Radu Serban, and Aaron Collier
* @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2020, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* This is the implementation file for an OpenMP implementation
* of the NVECTOR module.
* -----------------------------------------------------------------*/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <nvector/nvector_openmp.h>
#include <sundials/sundials_math.h>
#define ZERO RCONST(0.0)
#define HALF RCONST(0.5)
#define ONE RCONST(1.0)
#define ONEPT5 RCONST(1.5)
/* Private functions for special cases of vector operations */
static void VCopy_OpenMP(N_Vector x, N_Vector z); /* z=x */
static void VSum_OpenMP(N_Vector x, N_Vector y, N_Vector z); /* z=x+y */
static void VDiff_OpenMP(N_Vector x, N_Vector y, N_Vector z); /* z=x-y */
static void VNeg_OpenMP(N_Vector x, N_Vector z); /* z=-x */
static void VScaleSum_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x+y) */
static void VScaleDiff_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x-y) */
static void VLin1_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax+y */
static void VLin2_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax-y */
static void Vaxpy_OpenMP(realtype a, N_Vector x, N_Vector y); /* y <- ax+y */
static void VScaleBy_OpenMP(realtype a, N_Vector x); /* x <- ax */
/* Private functions for special cases of vector array operations */
static int VSumVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X+Y */
static int VDiffVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X-Y */
static int VScaleSumVectorArray_OpenMP(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X+Y) */
static int VScaleDiffVectorArray_OpenMP(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X-Y) */
static int VLin1VectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX+Y */
static int VLin2VectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX-Y */
static int VaxpyVectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y); /* Y <- aX+Y */
/*
* -----------------------------------------------------------------
* exported functions
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------
* Returns vector type ID. Used to identify vector implementation
* from abstract N_Vector interface.
*/
N_Vector_ID N_VGetVectorID_OpenMP(N_Vector v)
{
return SUNDIALS_NVEC_OPENMP;
}
/* ----------------------------------------------------------------------------
* Function to create a new empty vector
*/
N_Vector N_VNewEmpty_OpenMP(sunindextype length, int num_threads)
{
N_Vector v;
N_VectorContent_OpenMP content;
/* Create vector */
v = NULL;
v = N_VNewEmpty();
if (v == NULL) return(NULL);
/* Attach operations */
/* constructors, destructors, and utility operations */
v->ops->nvgetvectorid = N_VGetVectorID_OpenMP;
v->ops->nvclone = N_VClone_OpenMP;
v->ops->nvcloneempty = N_VCloneEmpty_OpenMP;
v->ops->nvdestroy = N_VDestroy_OpenMP;
v->ops->nvspace = N_VSpace_OpenMP;
v->ops->nvgetarraypointer = N_VGetArrayPointer_OpenMP;
v->ops->nvsetarraypointer = N_VSetArrayPointer_OpenMP;
v->ops->nvgetlength = N_VGetLength_OpenMP;
/* standard vector operations */
v->ops->nvlinearsum = N_VLinearSum_OpenMP;
v->ops->nvconst = N_VConst_OpenMP;
v->ops->nvprod = N_VProd_OpenMP;
v->ops->nvdiv = N_VDiv_OpenMP;
v->ops->nvscale = N_VScale_OpenMP;
v->ops->nvabs = N_VAbs_OpenMP;
v->ops->nvinv = N_VInv_OpenMP;
v->ops->nvaddconst = N_VAddConst_OpenMP;
v->ops->nvdotprod = N_VDotProd_OpenMP;
v->ops->nvmaxnorm = N_VMaxNorm_OpenMP;
v->ops->nvwrmsnormmask = N_VWrmsNormMask_OpenMP;
v->ops->nvwrmsnorm = N_VWrmsNorm_OpenMP;
v->ops->nvmin = N_VMin_OpenMP;
v->ops->nvwl2norm = N_VWL2Norm_OpenMP;
v->ops->nvl1norm = N_VL1Norm_OpenMP;
v->ops->nvcompare = N_VCompare_OpenMP;
v->ops->nvinvtest = N_VInvTest_OpenMP;
v->ops->nvconstrmask = N_VConstrMask_OpenMP;
v->ops->nvminquotient = N_VMinQuotient_OpenMP;
/* fused and vector array operations are disabled (NULL) by default */
/* local reduction kernels */
v->ops->nvdotprodlocal = N_VDotProd_OpenMP;
v->ops->nvmaxnormlocal = N_VMaxNorm_OpenMP;
v->ops->nvminlocal = N_VMin_OpenMP;
v->ops->nvl1normlocal = N_VL1Norm_OpenMP;
v->ops->nvinvtestlocal = N_VInvTest_OpenMP;
v->ops->nvconstrmasklocal = N_VConstrMask_OpenMP;
v->ops->nvminquotientlocal = N_VMinQuotient_OpenMP;
v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_OpenMP;
v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_OpenMP;
/* XBraid interface operations */
v->ops->nvbufsize = N_VBufSize_OpenMP;
v->ops->nvbufpack = N_VBufPack_OpenMP;
v->ops->nvbufunpack = N_VBufUnpack_OpenMP;
/* Create content */
content = NULL;
content = (N_VectorContent_OpenMP) malloc(sizeof *content);
if (content == NULL) { N_VDestroy(v); return(NULL); }
/* Attach content */
v->content = content;
/* Initialize content */
content->length = length;
content->num_threads = num_threads;
content->own_data = SUNFALSE;
content->data = NULL;
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create a new vector
*/
N_Vector N_VNew_OpenMP(sunindextype length, int num_threads)
{
N_Vector v;
realtype *data;
v = NULL;
v = N_VNewEmpty_OpenMP(length, num_threads);
if (v == NULL) return(NULL);
/* Create data */
if (length > 0) {
/* Allocate memory */
data = NULL;
data = (realtype *) malloc(length * sizeof(realtype));
if(data == NULL) { N_VDestroy_OpenMP(v); return(NULL); }
/* Attach data */
NV_OWN_DATA_OMP(v) = SUNTRUE;
NV_DATA_OMP(v) = data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create a vector with user data component
*/
N_Vector N_VMake_OpenMP(sunindextype length, realtype *v_data, int num_threads)
{
N_Vector v;
v = NULL;
v = N_VNewEmpty_OpenMP(length, num_threads);
if (v == NULL) return(NULL);
if (length > 0) {
/* Attach data */
NV_OWN_DATA_OMP(v) = SUNFALSE;
NV_DATA_OMP(v) = v_data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create an array of new vectors.
*/
N_Vector* N_VCloneVectorArray_OpenMP(int count, N_Vector w)
{
N_Vector* vs;
int j;
if (count <= 0) return(NULL);
vs = NULL;
vs = (N_Vector*) malloc(count * sizeof(N_Vector));
if(vs == NULL) return(NULL);
for (j = 0; j < count; j++) {
vs[j] = NULL;
vs[j] = N_VClone_OpenMP(w);
if (vs[j] == NULL) {
N_VDestroyVectorArray_OpenMP(vs, j-1);
return(NULL);
}
}
return(vs);
}
/* ----------------------------------------------------------------------------
* Function to create an array of new vectors with NULL data array.
*/
N_Vector* N_VCloneVectorArrayEmpty_OpenMP(int count, N_Vector w)
{
N_Vector* vs;
int j;
if (count <= 0) return(NULL);
vs = NULL;
vs = (N_Vector*) malloc(count * sizeof(N_Vector));
if(vs == NULL) return(NULL);
for (j = 0; j < count; j++) {
vs[j] = NULL;
vs[j] = N_VCloneEmpty_OpenMP(w);
if (vs[j] == NULL) {
N_VDestroyVectorArray_OpenMP(vs, j-1);
return(NULL);
}
}
return(vs);
}
/* ----------------------------------------------------------------------------
* Function to free an array created with N_VCloneVectorArray_OpenMP
*/
void N_VDestroyVectorArray_OpenMP(N_Vector* vs, int count)
{
int j;
for (j = 0; j < count; j++) N_VDestroy_OpenMP(vs[j]);
free(vs); vs = NULL;
return;
}
/* ----------------------------------------------------------------------------
* Function to return number of vector elements
*/
sunindextype N_VGetLength_OpenMP(N_Vector v)
{
return NV_LENGTH_OMP(v);
}
/* ----------------------------------------------------------------------------
* Function to print a vector to stdout
*/
void N_VPrint_OpenMP(N_Vector x)
{
N_VPrintFile_OpenMP(x, stdout);
}
/* ----------------------------------------------------------------------------
* Function to print a vector to outfile
*/
void N_VPrintFile_OpenMP(N_Vector x, FILE *outfile)
{
sunindextype i, N;
realtype *xd;
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
for (i = 0; i < N; i++) {
#if defined(SUNDIALS_EXTENDED_PRECISION)
STAN_SUNDIALS_FPRINTF(outfile, "%11.8Lg\n", xd[i]);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
STAN_SUNDIALS_FPRINTF(outfile, "%11.8g\n", xd[i]);
#else
STAN_SUNDIALS_FPRINTF(outfile, "%11.8g\n", xd[i]);
#endif
}
STAN_SUNDIALS_FPRINTF(outfile, "\n");
return;
}
/*
* -----------------------------------------------------------------
* implementation of vector operations
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------------------
* Create new vector from existing vector without attaching data
*/
N_Vector N_VCloneEmpty_OpenMP(N_Vector w)
{
N_Vector v;
N_VectorContent_OpenMP content;
if (w == NULL) return(NULL);
/* Create vector */
v = NULL;
v = N_VNewEmpty();
if (v == NULL) return(NULL);
/* Attach operations */
if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); }
/* Create content */
content = NULL;
content = (N_VectorContent_OpenMP) malloc(sizeof *content);
if (content == NULL) { N_VDestroy(v); return(NULL); }
/* Attach content */
v->content = content;
/* Initialize content */
content->length = NV_LENGTH_OMP(w);
content->num_threads = NV_NUM_THREADS_OMP(w);
content->own_data = SUNFALSE;
content->data = NULL;
return(v);
}
/* ----------------------------------------------------------------------------
* Create new vector from existing vector and attach data
*/
N_Vector N_VClone_OpenMP(N_Vector w)
{
N_Vector v;
realtype *data;
sunindextype length;
v = NULL;
v = N_VCloneEmpty_OpenMP(w);
if (v == NULL) return(NULL);
length = NV_LENGTH_OMP(w);
/* Create data */
if (length > 0) {
/* Allocate memory */
data = NULL;
data = (realtype *) malloc(length * sizeof(realtype));
if(data == NULL) { N_VDestroy_OpenMP(v); return(NULL); }
/* Attach data */
NV_OWN_DATA_OMP(v) = SUNTRUE;
NV_DATA_OMP(v) = data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Destroy vector and free vector memory
*/
void N_VDestroy_OpenMP(N_Vector v)
{
if (v == NULL) return;
/* free content */
if (v->content != NULL) {
/* free data array if it's owned by the vector */
if (NV_OWN_DATA_OMP(v) && NV_DATA_OMP(v) != NULL) {
free(NV_DATA_OMP(v));
NV_DATA_OMP(v) = NULL;
}
free(v->content);
v->content = NULL;
}
/* free ops and vector */
if (v->ops != NULL) { free(v->ops); v->ops = NULL; }
free(v); v = NULL;
return;
}
/* ----------------------------------------------------------------------------
* Get storage requirement for N_Vector
*/
void N_VSpace_OpenMP(N_Vector v, sunindextype *lrw, sunindextype *liw)
{
*lrw = NV_LENGTH_OMP(v);
*liw = 1;
return;
}
/* ----------------------------------------------------------------------------
* Get vector data pointer
*/
realtype *N_VGetArrayPointer_OpenMP(N_Vector v)
{
return((realtype *) NV_DATA_OMP(v));
}
/* ----------------------------------------------------------------------------
* Set vector data pointer
*/
void N_VSetArrayPointer_OpenMP(realtype *v_data, N_Vector v)
{
if (NV_LENGTH_OMP(v) > 0) NV_DATA_OMP(v) = v_data;
return;
}
/* ----------------------------------------------------------------------------
* Compute linear combination z[i] = a*x[i]+b*y[i]
*/
void N_VLinearSum_OpenMP(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype c, *xd, *yd, *zd;
N_Vector v1, v2;
booleantype test;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
if ((b == ONE) && (z == y)) { /* BLAS usage: axpy y <- ax+y */
Vaxpy_OpenMP(a,x,y);
return;
}
if ((a == ONE) && (z == x)) { /* BLAS usage: axpy x <- by+x */
Vaxpy_OpenMP(b,y,x);
return;
}
/* Case: a == b == 1.0 */
if ((a == ONE) && (b == ONE)) {
VSum_OpenMP(x, y, z);
return;
}
/* Cases: (1) a == 1.0, b = -1.0, (2) a == -1.0, b == 1.0 */
if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) {
v1 = test ? y : x;
v2 = test ? x : y;
VDiff_OpenMP(v2, v1, z);
return;
}
/* Cases: (1) a == 1.0, b == other or 0.0, (2) a == other or 0.0, b == 1.0 */
/* if a or b is 0.0, then user should have called N_VScale */
if ((test = (a == ONE)) || (b == ONE)) {
c = test ? b : a;
v1 = test ? y : x;
v2 = test ? x : y;
VLin1_OpenMP(c, v1, v2, z);
return;
}
/* Cases: (1) a == -1.0, b != 1.0, (2) a != 1.0, b == -1.0 */
if ((test = (a == -ONE)) || (b == -ONE)) {
c = test ? b : a;
v1 = test ? y : x;
v2 = test ? x : y;
VLin2_OpenMP(c, v1, v2, z);
return;
}
/* Case: a == b */
/* catches case both a and b are 0.0 - user should have called N_VConst */
if (a == b) {
VScaleSum_OpenMP(a, x, y, z);
return;
}
/* Case: a == -b */
if (a == -b) {
VScaleDiff_OpenMP(a, x, y, z);
return;
}
/* Do all cases not handled above:
(1) a == other, b == 0.0 - user should have called N_VScale
(2) a == 0.0, b == other - user should have called N_VScale
(3) a,b == other, a !=b, a != -b */
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,a,b,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = (a*xd[i])+(b*yd[i]);
return;
}
/* ----------------------------------------------------------------------------
* Assigns constant value to all vector elements, z[i] = c
*/
void N_VConst_OpenMP(realtype c, N_Vector z)
{
sunindextype i, N;
realtype *zd;
i = 0; /* initialize to suppress clang warning */
zd = NULL;
N = NV_LENGTH_OMP(z);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(z))
for (i = 0; i < N; i++) zd[i] = c;
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise product z[i] = x[i]*y[i]
*/
void N_VProd_OpenMP(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]*yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise division z[i] = x[i]/y[i]
*/
void N_VDiv_OpenMP(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]/yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaler multiplication z[i] = c*x[i]
*/
void N_VScale_OpenMP(realtype c, N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
if (z == x) { /* BLAS usage: scale x <- cx */
VScaleBy_OpenMP(c, x);
return;
}
if (c == ONE) {
VCopy_OpenMP(x, z);
} else if (c == -ONE) {
VNeg_OpenMP(x, z);
} else {
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = c*xd[i];
}
return;
}
/* ----------------------------------------------------------------------------
* Compute absolute value of vector components z[i] = SUNRabs(x[i])
*/
void N_VAbs_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = SUNRabs(xd[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise inverse z[i] = 1 / x[i]
*/
void N_VInv_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = ONE/xd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise addition of a scaler to a vector z[i] = x[i] + b
*/
void N_VAddConst_OpenMP(N_Vector x, realtype b, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,b,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]+b;
return;
}
/* ----------------------------------------------------------------------------
* Computes the dot product of two vectors, a = sum(x[i]*y[i])
*/
realtype N_VDotProd_OpenMP(N_Vector x, N_Vector y)
{
sunindextype i, N;
realtype sum, *xd, *yd;
i = 0; /* initialize to suppress clang warning */
sum = ZERO;
xd = yd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
sum += xd[i]*yd[i];
}
return(sum);
}
/* ----------------------------------------------------------------------------
* Computes max norm of a vector
*/
realtype N_VMaxNorm_OpenMP(N_Vector x)
{
sunindextype i, N;
realtype tmax, max, *xd;
i = 0; /* initialize to suppress clang warning */
max = ZERO;
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
#pragma omp parallel default(none) private(i,tmax) shared(N,max,xd) \
num_threads(NV_NUM_THREADS_OMP(x))
{
tmax = ZERO;
#pragma omp for schedule(static)
for (i = 0; i < N; i++) {
if (SUNRabs(xd[i]) > tmax) tmax = SUNRabs(xd[i]);
}
#pragma omp critical
{
if (tmax > max)
max = tmax;
}
}
return(max);
}
/* ----------------------------------------------------------------------------
* Computes weighted root mean square norm of a vector
*/
realtype N_VWrmsNorm_OpenMP(N_Vector x, N_Vector w)
{
return(SUNRsqrt(N_VWSqrSumLocal_OpenMP(x, w)/(NV_LENGTH_OMP(x))));
}
/* ----------------------------------------------------------------------------
* Computes weighted root mean square norm of a masked vector
*/
realtype N_VWrmsNormMask_OpenMP(N_Vector x, N_Vector w, N_Vector id)
{
return(SUNRsqrt(N_VWSqrSumMaskLocal_OpenMP(x, w, id)/(NV_LENGTH_OMP(x))));
}
/* ----------------------------------------------------------------------------
* Finds the minimun component of a vector
*/
realtype N_VMin_OpenMP(N_Vector x)
{
sunindextype i, N;
realtype min, *xd;
realtype tmin;
i = 0; /* initialize to suppress clang warning */
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
min = xd[0];
#pragma omp parallel default(none) private(i,tmin) shared(N,min,xd) \
num_threads(NV_NUM_THREADS_OMP(x))
{
tmin = xd[0];
#pragma omp for schedule(static)
for (i = 1; i < N; i++) {
if (xd[i] < tmin) tmin = xd[i];
}
if (tmin < min) {
#pragma omp critical
{
if (tmin < min) min = tmin;
}
}
}
return(min);
}
/* ----------------------------------------------------------------------------
* Computes weighted L2 norm of a vector
*/
realtype N_VWL2Norm_OpenMP(N_Vector x, N_Vector w)
{
sunindextype i, N;
realtype sum, *xd, *wd;
i = 0; /* initialize to suppress clang warning */
sum = ZERO;
xd = wd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
wd = NV_DATA_OMP(w);
#pragma omp parallel for default(none) private(i) shared(N,xd,wd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
sum += SUNSQR(xd[i]*wd[i]);
}
return(SUNRsqrt(sum));
}
/* ----------------------------------------------------------------------------
* Computes L1 norm of a vector
*/
realtype N_VL1Norm_OpenMP(N_Vector x)
{
sunindextype i, N;
realtype sum, *xd;
i = 0; /* initialize to suppress clang warning */
sum = ZERO;
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
#pragma omp parallel for default(none) private(i) shared(N,xd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i<N; i++)
sum += SUNRabs(xd[i]);
return(sum);
}
/* ----------------------------------------------------------------------------
* Compare vector component values to a scaler
*/
void N_VCompare_OpenMP(realtype c, N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
zd[i] = (SUNRabs(xd[i]) >= c) ? ONE : ZERO;
}
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise inverse z[i] = ONE/x[i] and checks if x[i] == ZERO
*/
booleantype N_VInvTest_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd, val;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
val = ZERO;
#pragma omp parallel for default(none) private(i) shared(N,val,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
if (xd[i] == ZERO)
val = ONE;
else
zd[i] = ONE/xd[i];
}
if (val > ZERO)
return (SUNFALSE);
else
return (SUNTRUE);
}
/* ----------------------------------------------------------------------------
* Compute constraint mask of a vector
*/
booleantype N_VConstrMask_OpenMP(N_Vector c, N_Vector x, N_Vector m)
{
sunindextype i, N;
realtype temp;
realtype *cd, *xd, *md;
booleantype test;
i = 0; /* initialize to suppress clang warning */
cd = xd = md = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
cd = NV_DATA_OMP(c);
md = NV_DATA_OMP(m);
temp = ZERO;
#pragma omp parallel for default(none) private(i,test) shared(N,xd,cd,md,temp) \
schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
md[i] = ZERO;
/* Continue if no constraints were set for the variable */
if (cd[i] == ZERO)
continue;
/* Check if a set constraint has been violated */
test = (SUNRabs(cd[i]) > ONEPT5 && xd[i]*cd[i] <= ZERO) ||
(SUNRabs(cd[i]) > HALF && xd[i]*cd[i] < ZERO);
if (test) {
temp = md[i] = ONE; /* Here is a race to write to temp */
}
}
/* Return false if any constraint was violated */
return (temp == ONE) ? SUNFALSE : SUNTRUE;
}
/* ----------------------------------------------------------------------------
* Compute minimum componentwise quotient
*/
realtype N_VMinQuotient_OpenMP(N_Vector num, N_Vector denom)
{
sunindextype i, N;
realtype *nd, *dd, min, tmin, val;
i = 0; /* initialize to suppress clang warning */
nd = dd = NULL;
N = NV_LENGTH_OMP(num);
nd = NV_DATA_OMP(num);
dd = NV_DATA_OMP(denom);
min = BIG_REAL;
#pragma omp parallel default(none) private(i,tmin,val) shared(N,min,nd,dd) \
num_threads(NV_NUM_THREADS_OMP(num))
{
tmin = BIG_REAL;
#pragma omp for schedule(static)
for (i = 0; i < N; i++) {
if (dd[i] != ZERO) {
val = nd[i]/dd[i];
if (val < tmin) tmin = val;
}
}
if (tmin < min) {
#pragma omp critical
{
if (tmin < min) min = tmin;
}
}
}
return(min);
}
/* ----------------------------------------------------------------------------
* Computes weighted square sum of a vector
*/
realtype N_VWSqrSumLocal_OpenMP(N_Vector x, N_Vector w)
{
sunindextype i, N;
realtype sum, *xd, *wd;
i = 0; /* initialize to suppress clang warning */
sum = ZERO;
xd = wd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
wd = NV_DATA_OMP(w);
#pragma omp parallel for default(none) private(i) shared(N,xd,wd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
sum += SUNSQR(xd[i]*wd[i]);
}
return(sum);
}
/* ----------------------------------------------------------------------------
* Computes weighted square sum of a masked vector
*/
realtype N_VWSqrSumMaskLocal_OpenMP(N_Vector x, N_Vector w, N_Vector id)
{
sunindextype i, N;
realtype sum, *xd, *wd, *idd;
i = 0; /* initialize to suppress clang warning */
sum = ZERO;
xd = wd = idd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
wd = NV_DATA_OMP(w);
idd = NV_DATA_OMP(id);
#pragma omp parallel for default(none) private(i) shared(N,xd,wd,idd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
if (idd[i] > ZERO) {
sum += SUNSQR(xd[i]*wd[i]);
}
}
return(sum);
}
/*
* -----------------------------------------------------------------
* fused vector operations
* -----------------------------------------------------------------
*/
int N_VLinearCombination_OpenMP(int nvec, realtype* c, N_Vector* X, N_Vector z)
{
int i;
sunindextype j, N;
realtype* zd=NULL;
realtype* xd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VScale */
if (nvec == 1) {
N_VScale_OpenMP(c[0], X[0], z);
return(0);
}
/* should have called N_VLinearSum */
if (nvec == 2) {
N_VLinearSum_OpenMP(c[0], X[0], c[1], X[1], z);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMP(z);
zd = NV_DATA_OMP(z);
/*
* X[0] += c[i]*X[i], i = 1,...,nvec-1
*/
if ((X[0] == z) && (c[0] == ONE)) {
#pragma omp parallel default(none) private(i,j,xd) shared(nvec,X,N,c,zd) \
num_threads(NV_NUM_THREADS_OMP(z))
{
for (i=1; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] += c[i] * xd[j];
}
}
}
return(0);
}
/*
* X[0] = c[0] * X[0] + sum{ c[i] * X[i] }, i = 1,...,nvec-1
*/
if (X[0] == z) {
#pragma omp parallel default(none) private(i,j,xd) shared(nvec,X,N,c,zd) \
num_threads(NV_NUM_THREADS_OMP(z))
{
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] *= c[0];
}
for (i=1; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] += c[i] * xd[j];
}
}
}
return(0);
}
/*
* z = sum{ c[i] * X[i] }, i = 0,...,nvec-1
*/
#pragma omp parallel default(none) private(i,j,xd) shared(nvec,X,N,c,zd) \
num_threads(NV_NUM_THREADS_OMP(z))
{
xd = NV_DATA_OMP(X[0]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] = c[0] * xd[j];
}
for (i=1; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] += c[i] * xd[j];
}
}
}
return(0);
}
int N_VScaleAddMulti_OpenMP(int nvec, realtype* a, N_Vector x, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VLinearSum */
if (nvec == 1) {
N_VLinearSum_OpenMP(a[0], x, ONE, Y[0], Z[0]);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
/*
* Y[i][j] += a[i] * x[j]
*/
if (Y == Z) {
#pragma omp parallel default(none) private(i,j,yd) shared(nvec,Y,N,a,xd) \
num_threads(NV_NUM_THREADS_OMP(x))
{
for (i=0; i<nvec; i++) {
yd = NV_DATA_OMP(Y[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
yd[j] += a[i] * xd[j];
}
}
}
return(0);
}
/*
* Z[i][j] = Y[i][j] + a[i] * x[j]
*/
#pragma omp parallel default(none) private(i,j,yd,zd) shared(nvec,Y,Z,N,a,xd) \
num_threads(NV_NUM_THREADS_OMP(x))
{
for (i=0; i<nvec; i++) {
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] = a[i] * xd[j] + yd[j];
}
}
}
return(0);
}
int N_VDotProdMulti_OpenMP(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods)
{
int i;
sunindextype j, N;
realtype sum;
realtype* xd=NULL;
realtype* yd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VDotProd */
if (nvec == 1) {
dotprods[0] = N_VDotProd_OpenMP(x, Y[0]);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
/* initialize dot products */
for (i=0; i<nvec; i++) {
dotprods[i] = ZERO;
}
/* compute multiple dot products */
#pragma omp parallel default(none) private(i,j,yd,sum) shared(nvec,Y,N,xd,dotprods) \
num_threads(NV_NUM_THREADS_OMP(x))
{
for (i=0; i<nvec; i++) {
yd = NV_DATA_OMP(Y[i]);
sum = ZERO;
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
sum += xd[j] * yd[j];
}
#pragma omp critical
{
dotprods[i] += sum;
}
}
}
return(0);
}
/*
* -----------------------------------------------------------------
* vector array operations
* -----------------------------------------------------------------
*/
int N_VLinearSumVectorArray_OpenMP(int nvec,
realtype a, N_Vector* X,
realtype b, N_Vector* Y,
N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
realtype c;
N_Vector* V1;
N_Vector* V2;
booleantype test;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VLinearSum */
if (nvec == 1) {
N_VLinearSum_OpenMP(a, X[0], b, Y[0], Z[0]);
return(0);
}
/* BLAS usage: axpy y <- ax+y */
if ((b == ONE) && (Z == Y))
return(VaxpyVectorArray_OpenMP(nvec, a, X, Y));
/* BLAS usage: axpy x <- by+x */
if ((a == ONE) && (Z == X))
return(VaxpyVectorArray_OpenMP(nvec, b, Y, X));
/* Case: a == b == 1.0 */
if ((a == ONE) && (b == ONE))
return(VSumVectorArray_OpenMP(nvec, X, Y, Z));
/* Cases: */
/* (1) a == 1.0, b = -1.0, */
/* (2) a == -1.0, b == 1.0 */
if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) {
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VDiffVectorArray_OpenMP(nvec, V2, V1, Z));
}
/* Cases: */
/* (1) a == 1.0, b == other or 0.0, */
/* (2) a == other or 0.0, b == 1.0 */
/* if a or b is 0.0, then user should have called N_VScale */
if ((test = (a == ONE)) || (b == ONE)) {
c = test ? b : a;
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VLin1VectorArray_OpenMP(nvec, c, V1, V2, Z));
}
/* Cases: */
/* (1) a == -1.0, b != 1.0, */
/* (2) a != 1.0, b == -1.0 */
if ((test = (a == -ONE)) || (b == -ONE)) {
c = test ? b : a;
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VLin2VectorArray_OpenMP(nvec, c, V1, V2, Z));
}
/* Case: a == b */
/* catches case both a and b are 0.0 - user should have called N_VConst */
if (a == b)
return(VScaleSumVectorArray_OpenMP(nvec, a, X, Y, Z));
/* Case: a == -b */
if (a == -b)
return(VScaleDiffVectorArray_OpenMP(nvec, a, X, Y, Z));
/* Do all cases not handled above: */
/* (1) a == other, b == 0.0 - user should have called N_VScale */
/* (2) a == 0.0, b == other - user should have called N_VScale */
/* (3) a,b == other, a !=b, a != -b */
/* get vector length */
N = NV_LENGTH_OMP(Z[0]);
/* compute linear sum for each vector pair in vector arrays */
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,a,b) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] = a * xd[j] + b * yd[j];
}
}
}
return(0);
}
int N_VScaleVectorArray_OpenMP(int nvec, realtype* c, N_Vector* X, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VScale */
if (nvec == 1) {
N_VScale_OpenMP(c[0], X[0], Z[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMP(Z[0]);
/*
* X[i] *= c[i]
*/
if (X == Z) {
#pragma omp parallel default(none) private(i,j,xd) shared(nvec,X,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
xd[j] *= c[i];
}
}
}
return(0);
}
/*
* Z[i] = c[i] * X[i]
*/
#pragma omp parallel default(none) private(i,j,xd,zd) shared(nvec,X,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] = c[i] * xd[j];
}
}
}
return(0);
}
int N_VConstVectorArray_OpenMP(int nvec, realtype c, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VConst */
if (nvec == 1) {
N_VConst_OpenMP(c, Z[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMP(Z[0]);
/* set each vector in the vector array to a constant */
#pragma omp parallel default(none) private(i,j,zd) shared(nvec,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (i=0; i<nvec; i++) {
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] = c;
}
}
}
return(0);
}
int N_VWrmsNormVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* W, realtype* nrm)
{
int i;
sunindextype j, N;
realtype sum;
realtype* wd=NULL;
realtype* xd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VWrmsNorm */
if (nvec == 1) {
nrm[0] = N_VWrmsNorm_OpenMP(X[0], W[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMP(X[0]);
/* initialize norms */
for (i=0; i<nvec; i++) {
nrm[i] = ZERO;
}
/* compute the WRMS norm for each vector in the vector array */
#pragma omp parallel default(none) private(i,j,xd,wd,sum) shared(nvec,X,W,N,nrm) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
wd = NV_DATA_OMP(W[i]);
sum = ZERO;
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
sum += SUNSQR(xd[j] * wd[j]);
}
#pragma omp critical
{
nrm[i] += sum;
}
}
}
for (i=0; i<nvec; i++) {
nrm[i] = SUNRsqrt(nrm[i]/N);
}
return(0);
}
int N_VWrmsNormMaskVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* W,
N_Vector id, realtype* nrm)
{
int i;
sunindextype j, N;
realtype sum;
realtype* wd=NULL;
realtype* xd=NULL;
realtype* idd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VWrmsNorm */
if (nvec == 1) {
nrm[0] = N_VWrmsNormMask_OpenMP(X[0], W[0], id);
return(0);
}
/* get vector length and mask data array */
N = NV_LENGTH_OMP(X[0]);
idd = NV_DATA_OMP(id);
/* initialize norms */
for (i=0; i<nvec; i++) {
nrm[i] = ZERO;
}
/* compute the WRMS norm for each vector in the vector array */
#pragma omp parallel default(none) private(i,j,xd,wd,sum) shared(nvec,X,W,N,idd,nrm) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
wd = NV_DATA_OMP(W[i]);
sum = ZERO;
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
if (idd[j] > ZERO)
sum += SUNSQR(xd[j] * wd[j]);
}
#pragma omp critical
{
nrm[i] += sum;
}
}
}
for (i=0; i<nvec; i++) {
nrm[i] = SUNRsqrt(nrm[i]/N);
}
return(0);
}
int N_VScaleAddMultiVectorArray_OpenMP(int nvec, int nsum, realtype* a,
N_Vector* X, N_Vector** Y, N_Vector** Z)
{
int i, j;
sunindextype k, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
int retval;
N_Vector* YY;
N_Vector* ZZ;
i = 0; /* initialize to suppress clang warning */
k = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
if (nsum < 1) return(-1);
/* ---------------------------
* Special cases for nvec == 1
* --------------------------- */
if (nvec == 1) {
/* should have called N_VLinearSum */
if (nsum == 1) {
N_VLinearSum_OpenMP(a[0], X[0], ONE, Y[0][0], Z[0][0]);
return(0);
}
/* should have called N_VScaleAddMulti */
YY = (N_Vector*) malloc(nsum * sizeof(N_Vector));
ZZ = (N_Vector*) malloc(nsum * sizeof(N_Vector));
for (j=0; j<nsum; j++) {
YY[j] = Y[j][0];
ZZ[j] = Z[j][0];
}
retval = N_VScaleAddMulti_OpenMP(nsum, a, X[0], YY, ZZ);
free(YY);
free(ZZ);
return(retval);
}
/* --------------------------
* Special cases for nvec > 1
* -------------------------- */
/* should have called N_VLinearSumVectorArray */
if (nsum == 1) {
retval = N_VLinearSumVectorArray_OpenMP(nvec, a[0], X, ONE, Y[0], Z[0]);
return(retval);
}
/* ----------------------------
* Compute multiple linear sums
* ---------------------------- */
/* get vector length */
N = NV_LENGTH_OMP(X[0]);
/*
* Y[i][j] += a[i] * x[j]
*/
if (Y == Z) {
#pragma omp parallel default(none) private(i,j,k,xd,yd) shared(nvec,nsum,X,Y,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
for (j=0; j<nsum; j++) {
yd = NV_DATA_OMP(Y[j][i]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
yd[k] += a[j] * xd[k];
}
}
}
}
return(0);
}
/*
* Z[i][j] = Y[i][j] + a[i] * x[j]
*/
#pragma omp parallel default(none) private(i,j,k,xd,yd,zd) shared(nvec,nsum,X,Y,Z,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
for (j=0; j<nsum; j++) {
yd = NV_DATA_OMP(Y[j][i]);
zd = NV_DATA_OMP(Z[j][i]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] = a[j] * xd[k] + yd[k];
}
}
}
}
return(0);
}
int N_VLinearCombinationVectorArray_OpenMP(int nvec, int nsum,
realtype* c,
N_Vector** X,
N_Vector* Z)
{
int i; /* vector arrays index in summation [0,nsum) */
int j; /* vector index in vector array [0,nvec) */
sunindextype k; /* element index in vector [0,N) */
sunindextype N;
realtype* zd=NULL;
realtype* xd=NULL;
realtype* ctmp;
N_Vector* Y;
i = 0; /* initialize to suppress clang warning */
k = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
if (nsum < 1) return(-1);
/* ---------------------------
* Special cases for nvec == 1
* --------------------------- */
if (nvec == 1) {
/* should have called N_VScale */
if (nsum == 1) {
N_VScale_OpenMP(c[0], X[0][0], Z[0]);
return(0);
}
/* should have called N_VLinearSum */
if (nsum == 2) {
N_VLinearSum_OpenMP(c[0], X[0][0], c[1], X[1][0], Z[0]);
return(0);
}
/* should have called N_VLinearCombination */
Y = (N_Vector*) malloc(nsum * sizeof(N_Vector));
for (i=0; i<nsum; i++) {
Y[i] = X[i][0];
}
N_VLinearCombination_OpenMP(nsum, c, Y, Z[0]);
free(Y);
return(0);
}
/* --------------------------
* Special cases for nvec > 1
* -------------------------- */
/* should have called N_VScaleVectorArray */
if (nsum == 1) {
ctmp = (realtype*) malloc(nvec * sizeof(realtype));
for (j=0; j<nvec; j++) {
ctmp[j] = c[0];
}
N_VScaleVectorArray_OpenMP(nvec, ctmp, X[0], Z);
free(ctmp);
return(0);
}
/* should have called N_VLinearSumVectorArray */
if (nsum == 2) {
N_VLinearSumVectorArray_OpenMP(nvec, c[0], X[0], c[1], X[1], Z);
return(0);
}
/* --------------------------
* Compute linear combination
* -------------------------- */
/* get vector length */
N = NV_LENGTH_OMP(Z[0]);
/*
* X[0][j] += c[i]*X[i][j], i = 1,...,nvec-1
*/
if ((X[0] == Z) && (c[0] == ONE)) {
#pragma omp parallel default(none) private(i,j,k,xd,zd) shared(nvec,nsum,X,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (j=0; j<nvec; j++) {
zd = NV_DATA_OMP(Z[j]);
for (i=1; i<nsum; i++) {
xd = NV_DATA_OMP(X[i][j]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] += c[i] * xd[k];
}
}
}
}
return(0);
}
/*
* X[0][j] = c[0] * X[0][j] + sum{ c[i] * X[i][j] }, i = 1,...,nvec-1
*/
if (X[0] == Z) {
#pragma omp parallel default(none) private(i,j,k,xd,zd) shared(nvec,nsum,X,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (j=0; j<nvec; j++) {
zd = NV_DATA_OMP(Z[j]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] *= c[0];
}
for (i=1; i<nsum; i++) {
xd = NV_DATA_OMP(X[i][j]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] += c[i] * xd[k];
}
}
}
}
return(0);
}
/*
* Z[j] = sum{ c[i] * X[i][j] }, i = 0,...,nvec-1
*/
#pragma omp parallel default(none) private(i,j,k,xd,zd) shared(nvec,nsum,X,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (j=0; j<nvec; j++) {
/* scale first vector in the sum into the output vector */
xd = NV_DATA_OMP(X[0][j]);
zd = NV_DATA_OMP(Z[j]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] = c[0] * xd[k];
}
/* scale and sum remaining vectors into the output vector */
for (i=1; i<nsum; i++) {
xd = NV_DATA_OMP(X[i][j]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] += c[i] * xd[k];
}
}
}
}
return(0);
}
/*
* -----------------------------------------------------------------
* OPTIONAL XBraid interface operations
* -----------------------------------------------------------------
*/
int N_VBufSize_OpenMP(N_Vector x, sunindextype *size)
{
if (x == NULL) return(-1);
*size = NV_LENGTH_OMP(x) * ((sunindextype)sizeof(realtype));
return(0);
}
int N_VBufPack_OpenMP(N_Vector x, void *buf)
{
sunindextype i, N;
realtype *xd = NULL;
realtype *bd = NULL;
if (x == NULL || buf == NULL) return(-1);
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
bd = (realtype*) buf;
#pragma omp for schedule(static)
for (i = 0; i < N; i++)
bd[i] = xd[i];
return(0);
}
int N_VBufUnpack_OpenMP(N_Vector x, void *buf)
{
sunindextype i, N;
realtype *xd = NULL;
realtype *bd = NULL;
if (x == NULL || buf == NULL) return(-1);
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
bd = (realtype*) buf;
#pragma omp for schedule(static)
for (i = 0; i < N; i++)
xd[i] = bd[i];
return(0);
}
/*
* -----------------------------------------------------------------
* private functions for special cases of vector operations
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------------------
* Copy vector components into a second vector
*/
static void VCopy_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector sum
*/
static void VSum_OpenMP(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]+yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector difference
*/
static void VDiff_OpenMP(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]-yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute the negative of a vector
*/
static void VNeg_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = -xd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector sum
*/
static void VScaleSum_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = c*(xd[i]+yd[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector difference
*/
static void VScaleDiff_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = c*(xd[i]-yd[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute vector sum z[i] = a*x[i]+y[i]
*/
static void VLin1_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,a,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = (a*xd[i])+yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector difference z[i] = a*x[i]-y[i]
*/
static void VLin2_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,a,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = (a*xd[i])-yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute special cases of linear sum
*/
static void Vaxpy_OpenMP(realtype a, N_Vector x, N_Vector y)
{
sunindextype i, N;
realtype *xd, *yd;
i = 0; /* initialize to suppress clang warning */
xd = yd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
if (a == ONE) {
#pragma omp parallel for default(none) private(i) shared(N,xd,yd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
yd[i] += xd[i];
return;
}
if (a == -ONE) {
#pragma omp parallel for default(none) private(i) shared(N,xd,yd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
yd[i] -= xd[i];
return;
}
#pragma omp parallel for default(none) private(i) shared(N,a,xd,yd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
yd[i] += a*xd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector x[i] = a*x[i]
*/
static void VScaleBy_OpenMP(realtype a, N_Vector x)
{
sunindextype i, N;
realtype *xd;
i = 0; /* initialize to suppress clang warning */
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
#pragma omp parallel for default(none) private(i) shared(N,a,xd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
xd[i] *= a;
return;
}
/*
* -----------------------------------------------------------------
* private functions for special cases of vector array operations
* -----------------------------------------------------------------
*/
static int VSumVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = xd[j] + yd[j];
}
}
return(0);
}
static int VDiffVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = xd[j] - yd[j];
}
}
return(0);
}
static int VScaleSumVectorArray_OpenMP(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = c * (xd[j] + yd[j]);
}
}
return(0);
}
static int VScaleDiffVectorArray_OpenMP(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = c * (xd[j] - yd[j]);
}
}
return(0);
}
static int VLin1VectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = (a * xd[j]) + yd[j];
}
}
return(0);
}
static int VLin2VectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = (a * xd[j]) - yd[j];
}
}
return(0);
}
static int VaxpyVectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
if (a == ONE) {
#pragma omp parallel default(none) private(i,j,xd,yd) shared(nvec,X,Y,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
yd[j] += xd[j];
}
}
return(0);
}
if (a == -ONE) {
#pragma omp parallel default(none) private(i,j,xd,yd) shared(nvec,X,Y,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
yd[j] -= xd[j];
}
}
return(0);
}
#pragma omp parallel default(none) private(i,j,xd,yd) shared(nvec,X,Y,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
yd[j] += a * xd[j];
}
}
return(0);
}
/*
* -----------------------------------------------------------------
* Enable / Disable fused and vector array operations
* -----------------------------------------------------------------
*/
int N_VEnableFusedOps_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
if (tf) {
/* enable all fused vector operations */
v->ops->nvlinearcombination = N_VLinearCombination_OpenMP;
v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMP;
v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMP;
/* enable all vector array operations */
v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMP;
v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMP;
v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMP;
v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMP;
v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMP;
v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMP;
v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMP;
} else {
/* disable all fused vector operations */
v->ops->nvlinearcombination = NULL;
v->ops->nvscaleaddmulti = NULL;
v->ops->nvdotprodmulti = NULL;
/* disable all vector array operations */
v->ops->nvlinearsumvectorarray = NULL;
v->ops->nvscalevectorarray = NULL;
v->ops->nvconstvectorarray = NULL;
v->ops->nvwrmsnormvectorarray = NULL;
v->ops->nvwrmsnormmaskvectorarray = NULL;
v->ops->nvscaleaddmultivectorarray = NULL;
v->ops->nvlinearcombinationvectorarray = NULL;
}
/* return success */
return(0);
}
int N_VEnableLinearCombination_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearcombination = N_VLinearCombination_OpenMP;
else
v->ops->nvlinearcombination = NULL;
/* return success */
return(0);
}
int N_VEnableScaleAddMulti_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMP;
else
v->ops->nvscaleaddmulti = NULL;
/* return success */
return(0);
}
int N_VEnableDotProdMulti_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMP;
else
v->ops->nvdotprodmulti = NULL;
/* return success */
return(0);
}
int N_VEnableLinearSumVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMP;
else
v->ops->nvlinearsumvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableScaleVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMP;
else
v->ops->nvscalevectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableConstVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMP;
else
v->ops->nvconstvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableWrmsNormVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMP;
else
v->ops->nvwrmsnormvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableWrmsNormMaskVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMP;
else
v->ops->nvwrmsnormmaskvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableScaleAddMultiVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMP;
else
v->ops->nvscaleaddmultivectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableLinearCombinationVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMP;
else
v->ops->nvlinearcombinationvectorarray = NULL;
/* return success */
return(0);
}
|
decl2.c | /* Process declarations and variables for C++ compiler.
Copyright (C) 1988-2020 Free Software Foundation, Inc.
Hacked by Michael Tiemann (tiemann@cygnus.com)
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* Process declarations and symbol lookup for C++ front end.
Also constructs types; the standard scalar types at initialization,
and structure, union, array and enum types when they are declared. */
/* ??? not all decl nodes are given the most useful possible
line numbers. For example, the CONST_DECLs for enum values. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "memmodel.h"
#include "target.h"
#include "cp-tree.h"
#include "c-family/c-common.h"
#include "timevar.h"
#include "stringpool.h"
#include "cgraph.h"
#include "varasm.h"
#include "attribs.h"
#include "stor-layout.h"
#include "calls.h"
#include "decl.h"
#include "toplev.h"
#include "c-family/c-objc.h"
#include "c-family/c-pragma.h"
#include "dumpfile.h"
#include "intl.h"
#include "c-family/c-ada-spec.h"
#include "asan.h"
/* Id for dumping the raw trees. */
int raw_dump_id;
extern cpp_reader *parse_in;
/* This structure contains information about the initializations
and/or destructions required for a particular priority level. */
typedef struct priority_info_s {
/* Nonzero if there have been any initializations at this priority
throughout the translation unit. */
int initializations_p;
/* Nonzero if there have been any destructions at this priority
throughout the translation unit. */
int destructions_p;
} *priority_info;
static tree start_objects (int, int);
static void finish_objects (int, int, tree);
static tree start_static_storage_duration_function (unsigned);
static void finish_static_storage_duration_function (tree);
static priority_info get_priority_info (int);
static void do_static_initialization_or_destruction (tree, bool);
static void one_static_initialization_or_destruction (tree, tree, bool);
static void generate_ctor_or_dtor_function (bool, int, location_t *);
static int generate_ctor_and_dtor_functions_for_priority (splay_tree_node,
void *);
static tree prune_vars_needing_no_initialization (tree *);
static void write_out_vars (tree);
static void import_export_class (tree);
static tree get_guard_bits (tree);
static void determine_visibility_from_class (tree, tree);
static bool determine_hidden_inline (tree);
/* A list of static class variables. This is needed, because a
static class variable can be declared inside the class without
an initializer, and then initialized, statically, outside the class. */
static GTY(()) vec<tree, va_gc> *pending_statics;
/* A list of functions which were declared inline, but which we
may need to emit outline anyway. */
static GTY(()) vec<tree, va_gc> *deferred_fns;
/* A list of decls that use types with no linkage, which we need to make
sure are defined. */
static GTY(()) vec<tree, va_gc> *no_linkage_decls;
/* A vector of alternating decls and identifiers, where the latter
is to be an alias for the former if the former is defined. */
static GTY(()) vec<tree, va_gc> *mangling_aliases;
/* hash traits for declarations. Hashes single decls via
DECL_ASSEMBLER_NAME_RAW. */
struct mangled_decl_hash : ggc_remove <tree>
{
typedef tree value_type; /* A DECL. */
typedef tree compare_type; /* An identifier. */
static hashval_t hash (const value_type decl)
{
return IDENTIFIER_HASH_VALUE (DECL_ASSEMBLER_NAME_RAW (decl));
}
static bool equal (const value_type existing, compare_type candidate)
{
tree name = DECL_ASSEMBLER_NAME_RAW (existing);
return candidate == name;
}
static const bool empty_zero_p = true;
static inline void mark_empty (value_type &p) {p = NULL_TREE;}
static inline bool is_empty (value_type p) {return !p;}
static bool is_deleted (value_type e)
{
return e == reinterpret_cast <value_type> (1);
}
static void mark_deleted (value_type &e)
{
e = reinterpret_cast <value_type> (1);
}
};
/* A hash table of decls keyed by mangled name. Used to figure out if
we need compatibility aliases. */
static GTY(()) hash_table<mangled_decl_hash> *mangled_decls;
/* Nonzero if we're done parsing and into end-of-file activities. */
int at_eof;
/* True if note_mangling_alias should enqueue mangling aliases for
later generation, rather than emitting them right away. */
bool defer_mangling_aliases = true;
/* Return a member function type (a METHOD_TYPE), given FNTYPE (a
FUNCTION_TYPE), CTYPE (class type), and QUALS (the cv-qualifiers
that apply to the function). */
tree
build_memfn_type (tree fntype, tree ctype, cp_cv_quals quals,
cp_ref_qualifier rqual)
{
if (fntype == error_mark_node || ctype == error_mark_node)
return error_mark_node;
gcc_assert (FUNC_OR_METHOD_TYPE_P (fntype));
cp_cv_quals type_quals = quals & ~TYPE_QUAL_RESTRICT;
ctype = cp_build_qualified_type (ctype, type_quals);
tree newtype
= build_method_type_directly (ctype, TREE_TYPE (fntype),
(TREE_CODE (fntype) == METHOD_TYPE
? TREE_CHAIN (TYPE_ARG_TYPES (fntype))
: TYPE_ARG_TYPES (fntype)));
if (tree attrs = TYPE_ATTRIBUTES (fntype))
newtype = cp_build_type_attribute_variant (newtype, attrs);
newtype = build_cp_fntype_variant (newtype, rqual,
TYPE_RAISES_EXCEPTIONS (fntype),
TYPE_HAS_LATE_RETURN_TYPE (fntype));
return newtype;
}
/* Return a variant of FNTYPE, a FUNCTION_TYPE or METHOD_TYPE, with its
return type changed to NEW_RET. */
tree
change_return_type (tree new_ret, tree fntype)
{
if (new_ret == error_mark_node)
return fntype;
if (same_type_p (new_ret, TREE_TYPE (fntype)))
return fntype;
tree newtype;
tree args = TYPE_ARG_TYPES (fntype);
if (TREE_CODE (fntype) == FUNCTION_TYPE)
{
newtype = build_function_type (new_ret, args);
newtype = apply_memfn_quals (newtype,
type_memfn_quals (fntype));
}
else
newtype = build_method_type_directly
(class_of_this_parm (fntype), new_ret, TREE_CHAIN (args));
if (tree attrs = TYPE_ATTRIBUTES (fntype))
newtype = cp_build_type_attribute_variant (newtype, attrs);
newtype = cxx_copy_lang_qualifiers (newtype, fntype);
return newtype;
}
/* Build a PARM_DECL of FN with NAME and TYPE, and set DECL_ARG_TYPE
appropriately. */
tree
cp_build_parm_decl (tree fn, tree name, tree type)
{
tree parm = build_decl (input_location,
PARM_DECL, name, type);
DECL_CONTEXT (parm) = fn;
/* DECL_ARG_TYPE is only used by the back end and the back end never
sees templates. */
if (!processing_template_decl)
DECL_ARG_TYPE (parm) = type_passed_as (type);
return parm;
}
/* Returns a PARM_DECL of FN for a parameter of the indicated TYPE, with the
indicated NAME. */
tree
build_artificial_parm (tree fn, tree name, tree type)
{
tree parm = cp_build_parm_decl (fn, name, type);
DECL_ARTIFICIAL (parm) = 1;
/* All our artificial parms are implicitly `const'; they cannot be
assigned to. */
TREE_READONLY (parm) = 1;
return parm;
}
/* Constructors for types with virtual baseclasses need an "in-charge" flag
saying whether this constructor is responsible for initialization of
virtual baseclasses or not. All destructors also need this "in-charge"
flag, which additionally determines whether or not the destructor should
free the memory for the object.
This function adds the "in-charge" flag to member function FN if
appropriate. It is called from grokclassfn and tsubst.
FN must be either a constructor or destructor.
The in-charge flag follows the 'this' parameter, and is followed by the
VTT parm (if any), then the user-written parms. */
void
maybe_retrofit_in_chrg (tree fn)
{
tree basetype, arg_types, parms, parm, fntype;
/* If we've already add the in-charge parameter don't do it again. */
if (DECL_HAS_IN_CHARGE_PARM_P (fn))
return;
/* When processing templates we can't know, in general, whether or
not we're going to have virtual baseclasses. */
if (processing_template_decl)
return;
/* We don't need an in-charge parameter for constructors that don't
have virtual bases. */
if (DECL_CONSTRUCTOR_P (fn)
&& !CLASSTYPE_VBASECLASSES (DECL_CONTEXT (fn)))
return;
arg_types = TYPE_ARG_TYPES (TREE_TYPE (fn));
basetype = TREE_TYPE (TREE_VALUE (arg_types));
arg_types = TREE_CHAIN (arg_types);
parms = DECL_CHAIN (DECL_ARGUMENTS (fn));
/* If this is a subobject constructor or destructor, our caller will
pass us a pointer to our VTT. */
if (CLASSTYPE_VBASECLASSES (DECL_CONTEXT (fn)))
{
parm = build_artificial_parm (fn, vtt_parm_identifier, vtt_parm_type);
/* First add it to DECL_ARGUMENTS between 'this' and the real args... */
DECL_CHAIN (parm) = parms;
parms = parm;
/* ...and then to TYPE_ARG_TYPES. */
arg_types = hash_tree_chain (vtt_parm_type, arg_types);
DECL_HAS_VTT_PARM_P (fn) = 1;
}
/* Then add the in-charge parm (before the VTT parm). */
parm = build_artificial_parm (fn, in_charge_identifier, integer_type_node);
DECL_CHAIN (parm) = parms;
parms = parm;
arg_types = hash_tree_chain (integer_type_node, arg_types);
/* Insert our new parameter(s) into the list. */
DECL_CHAIN (DECL_ARGUMENTS (fn)) = parms;
/* And rebuild the function type. */
fntype = build_method_type_directly (basetype, TREE_TYPE (TREE_TYPE (fn)),
arg_types);
if (TYPE_ATTRIBUTES (TREE_TYPE (fn)))
fntype = (cp_build_type_attribute_variant
(fntype, TYPE_ATTRIBUTES (TREE_TYPE (fn))));
fntype = cxx_copy_lang_qualifiers (fntype, TREE_TYPE (fn));
TREE_TYPE (fn) = fntype;
/* Now we've got the in-charge parameter. */
DECL_HAS_IN_CHARGE_PARM_P (fn) = 1;
}
/* Classes overload their constituent function names automatically.
When a function name is declared in a record structure,
its name is changed to it overloaded name. Since names for
constructors and destructors can conflict, we place a leading
'$' for destructors.
CNAME is the name of the class we are grokking for.
FUNCTION is a FUNCTION_DECL. It was created by `grokdeclarator'.
FLAGS contains bits saying what's special about today's
arguments. DTOR_FLAG == DESTRUCTOR.
If FUNCTION is a destructor, then we must add the `auto-delete' field
as a second parameter. There is some hair associated with the fact
that we must "declare" this variable in the manner consistent with the
way the rest of the arguments were declared.
QUALS are the qualifiers for the this pointer. */
void
grokclassfn (tree ctype, tree function, enum overload_flags flags)
{
tree fn_name = DECL_NAME (function);
/* Even within an `extern "C"' block, members get C++ linkage. See
[dcl.link] for details. */
SET_DECL_LANGUAGE (function, lang_cplusplus);
if (fn_name == NULL_TREE)
{
error ("name missing for member function");
fn_name = get_identifier ("<anonymous>");
DECL_NAME (function) = fn_name;
}
DECL_CONTEXT (function) = ctype;
if (flags == DTOR_FLAG)
DECL_CXX_DESTRUCTOR_P (function) = 1;
if (flags == DTOR_FLAG || DECL_CONSTRUCTOR_P (function))
maybe_retrofit_in_chrg (function);
}
/* Create an ARRAY_REF, checking for the user doing things backwards
along the way. DECLTYPE_P is for N3276, as in the parser. */
tree
grok_array_decl (location_t loc, tree array_expr, tree index_exp,
bool decltype_p)
{
tree type;
tree expr;
tree orig_array_expr = array_expr;
tree orig_index_exp = index_exp;
tree overload = NULL_TREE;
if (error_operand_p (array_expr) || error_operand_p (index_exp))
return error_mark_node;
if (processing_template_decl)
{
if (type_dependent_expression_p (array_expr)
|| type_dependent_expression_p (index_exp))
return build_min_nt_loc (loc, ARRAY_REF, array_expr, index_exp,
NULL_TREE, NULL_TREE);
array_expr = build_non_dependent_expr (array_expr);
index_exp = build_non_dependent_expr (index_exp);
}
type = TREE_TYPE (array_expr);
gcc_assert (type);
type = non_reference (type);
/* If they have an `operator[]', use that. */
if (MAYBE_CLASS_TYPE_P (type) || MAYBE_CLASS_TYPE_P (TREE_TYPE (index_exp)))
{
tsubst_flags_t complain = tf_warning_or_error;
if (decltype_p)
complain |= tf_decltype;
expr = build_new_op (loc, ARRAY_REF, LOOKUP_NORMAL, array_expr,
index_exp, NULL_TREE, &overload, complain);
}
else
{
tree p1, p2, i1, i2;
bool swapped = false;
/* Otherwise, create an ARRAY_REF for a pointer or array type.
It is a little-known fact that, if `a' is an array and `i' is
an int, you can write `i[a]', which means the same thing as
`a[i]'. */
if (TREE_CODE (type) == ARRAY_TYPE || VECTOR_TYPE_P (type))
p1 = array_expr;
else
p1 = build_expr_type_conversion (WANT_POINTER, array_expr, false);
if (TREE_CODE (TREE_TYPE (index_exp)) == ARRAY_TYPE)
p2 = index_exp;
else
p2 = build_expr_type_conversion (WANT_POINTER, index_exp, false);
i1 = build_expr_type_conversion (WANT_INT | WANT_ENUM, array_expr,
false);
i2 = build_expr_type_conversion (WANT_INT | WANT_ENUM, index_exp,
false);
if ((p1 && i2) && (i1 && p2))
error ("ambiguous conversion for array subscript");
if (p1 && i2)
array_expr = p1, index_exp = i2;
else if (i1 && p2)
swapped = true, array_expr = p2, index_exp = i1;
else
{
error_at (loc, "invalid types %<%T[%T]%> for array subscript",
type, TREE_TYPE (index_exp));
return error_mark_node;
}
if (array_expr == error_mark_node || index_exp == error_mark_node)
error ("ambiguous conversion for array subscript");
if (TYPE_PTR_P (TREE_TYPE (array_expr)))
array_expr = mark_rvalue_use (array_expr);
else
array_expr = mark_lvalue_use_nonread (array_expr);
index_exp = mark_rvalue_use (index_exp);
if (swapped
&& flag_strong_eval_order == 2
&& (TREE_SIDE_EFFECTS (array_expr) || TREE_SIDE_EFFECTS (index_exp)))
expr = build_array_ref (input_location, index_exp, array_expr);
else
expr = build_array_ref (input_location, array_expr, index_exp);
}
if (processing_template_decl && expr != error_mark_node)
{
if (overload != NULL_TREE)
return (build_min_non_dep_op_overload
(ARRAY_REF, expr, overload, orig_array_expr, orig_index_exp));
return build_min_non_dep (ARRAY_REF, expr, orig_array_expr, orig_index_exp,
NULL_TREE, NULL_TREE);
}
return expr;
}
/* Given the cast expression EXP, checking out its validity. Either return
an error_mark_node if there was an unavoidable error, return a cast to
void for trying to delete a pointer w/ the value 0, or return the
call to delete. If DOING_VEC is true, we handle things differently
for doing an array delete.
Implements ARM $5.3.4. This is called from the parser. */
tree
delete_sanity (location_t loc, tree exp, tree size, bool doing_vec,
int use_global_delete, tsubst_flags_t complain)
{
tree t, type;
if (exp == error_mark_node)
return exp;
if (processing_template_decl)
{
t = build_min (DELETE_EXPR, void_type_node, exp, size);
DELETE_EXPR_USE_GLOBAL (t) = use_global_delete;
DELETE_EXPR_USE_VEC (t) = doing_vec;
TREE_SIDE_EFFECTS (t) = 1;
SET_EXPR_LOCATION (t, loc);
return t;
}
location_t exp_loc = cp_expr_loc_or_loc (exp, loc);
/* An array can't have been allocated by new, so complain. */
if (TREE_CODE (TREE_TYPE (exp)) == ARRAY_TYPE
&& (complain & tf_warning))
warning_at (exp_loc, 0, "deleting array %q#E", exp);
t = build_expr_type_conversion (WANT_POINTER, exp, true);
if (t == NULL_TREE || t == error_mark_node)
{
if (complain & tf_error)
error_at (exp_loc,
"type %q#T argument given to %<delete%>, expected pointer",
TREE_TYPE (exp));
return error_mark_node;
}
type = TREE_TYPE (t);
/* As of Valley Forge, you can delete a pointer to const. */
/* You can't delete functions. */
if (TREE_CODE (TREE_TYPE (type)) == FUNCTION_TYPE)
{
if (complain & tf_error)
error_at (exp_loc,
"cannot delete a function. Only pointer-to-objects are "
"valid arguments to %<delete%>");
return error_mark_node;
}
/* Deleting ptr to void is undefined behavior [expr.delete/3]. */
if (VOID_TYPE_P (TREE_TYPE (type)))
{
if (complain & tf_warning)
warning_at (exp_loc, OPT_Wdelete_incomplete,
"deleting %qT is undefined", type);
doing_vec = 0;
}
/* Deleting a pointer with the value zero is valid and has no effect. */
if (integer_zerop (t))
return build1_loc (loc, NOP_EXPR, void_type_node, t);
if (doing_vec)
return build_vec_delete (loc, t, /*maxindex=*/NULL_TREE,
sfk_deleting_destructor,
use_global_delete, complain);
else
return build_delete (loc, type, t, sfk_deleting_destructor,
LOOKUP_NORMAL, use_global_delete,
complain);
}
/* Report an error if the indicated template declaration is not the
sort of thing that should be a member template. */
void
check_member_template (tree tmpl)
{
tree decl;
gcc_assert (TREE_CODE (tmpl) == TEMPLATE_DECL);
decl = DECL_TEMPLATE_RESULT (tmpl);
if (TREE_CODE (decl) == FUNCTION_DECL
|| DECL_ALIAS_TEMPLATE_P (tmpl)
|| (TREE_CODE (decl) == TYPE_DECL
&& MAYBE_CLASS_TYPE_P (TREE_TYPE (decl))))
{
/* The parser rejects template declarations in local classes
(with the exception of generic lambdas). */
gcc_assert (!current_function_decl || LAMBDA_FUNCTION_P (decl));
/* The parser rejects any use of virtual in a function template. */
gcc_assert (!(TREE_CODE (decl) == FUNCTION_DECL
&& DECL_VIRTUAL_P (decl)));
/* The debug-information generating code doesn't know what to do
with member templates. */
DECL_IGNORED_P (tmpl) = 1;
}
else if (variable_template_p (tmpl))
/* OK */;
else
error ("template declaration of %q#D", decl);
}
/* Sanity check: report error if this function FUNCTION is not
really a member of the class (CTYPE) it is supposed to belong to.
TEMPLATE_PARMS is used to specify the template parameters of a member
template passed as FUNCTION_DECL. If the member template is passed as a
TEMPLATE_DECL, it can be NULL since the parameters can be extracted
from the declaration. If the function is not a function template, it
must be NULL.
It returns the original declaration for the function, NULL_TREE if
no declaration was found, error_mark_node if an error was emitted. */
tree
check_classfn (tree ctype, tree function, tree template_parms)
{
if (DECL_USE_TEMPLATE (function)
&& !(TREE_CODE (function) == TEMPLATE_DECL
&& DECL_TEMPLATE_SPECIALIZATION (function))
&& DECL_MEMBER_TEMPLATE_P (DECL_TI_TEMPLATE (function)))
/* Since this is a specialization of a member template,
we're not going to find the declaration in the class.
For example, in:
struct S { template <typename T> void f(T); };
template <> void S::f(int);
we're not going to find `S::f(int)', but there's no
reason we should, either. We let our callers know we didn't
find the method, but we don't complain. */
return NULL_TREE;
/* Basic sanity check: for a template function, the template parameters
either were not passed, or they are the same of DECL_TEMPLATE_PARMS. */
if (TREE_CODE (function) == TEMPLATE_DECL)
{
if (template_parms
&& !comp_template_parms (template_parms,
DECL_TEMPLATE_PARMS (function)))
{
error ("template parameter lists provided don%'t match the "
"template parameters of %qD", function);
return error_mark_node;
}
template_parms = DECL_TEMPLATE_PARMS (function);
}
/* OK, is this a definition of a member template? */
bool is_template = (template_parms != NULL_TREE);
/* [temp.mem]
A destructor shall not be a member template. */
if (DECL_DESTRUCTOR_P (function) && is_template)
{
error ("destructor %qD declared as member template", function);
return error_mark_node;
}
/* We must enter the scope here, because conversion operators are
named by target type, and type equivalence relies on typenames
resolving within the scope of CTYPE. */
tree pushed_scope = push_scope (ctype);
tree matched = NULL_TREE;
tree fns = get_class_binding (ctype, DECL_NAME (function));
for (ovl_iterator iter (fns); !matched && iter; ++iter)
{
tree fndecl = *iter;
/* A member template definition only matches a member template
declaration. */
if (is_template != (TREE_CODE (fndecl) == TEMPLATE_DECL))
continue;
if (!DECL_DECLARES_FUNCTION_P (fndecl))
continue;
tree p1 = TYPE_ARG_TYPES (TREE_TYPE (function));
tree p2 = TYPE_ARG_TYPES (TREE_TYPE (fndecl));
/* We cannot simply call decls_match because this doesn't work
for static member functions that are pretending to be
methods, and because the name may have been changed by
asm("new_name"). */
/* Get rid of the this parameter on functions that become
static. */
if (DECL_STATIC_FUNCTION_P (fndecl)
&& TREE_CODE (TREE_TYPE (function)) == METHOD_TYPE)
p1 = TREE_CHAIN (p1);
/* ref-qualifier or absence of same must match. */
if (type_memfn_rqual (TREE_TYPE (function))
!= type_memfn_rqual (TREE_TYPE (fndecl)))
continue;
// Include constraints in the match.
tree c1 = get_constraints (function);
tree c2 = get_constraints (fndecl);
/* While finding a match, same types and params are not enough
if the function is versioned. Also check version ("target")
attributes. */
if (same_type_p (TREE_TYPE (TREE_TYPE (function)),
TREE_TYPE (TREE_TYPE (fndecl)))
&& compparms (p1, p2)
&& !targetm.target_option.function_versions (function, fndecl)
&& (!is_template
|| comp_template_parms (template_parms,
DECL_TEMPLATE_PARMS (fndecl)))
&& equivalent_constraints (c1, c2)
&& (DECL_TEMPLATE_SPECIALIZATION (function)
== DECL_TEMPLATE_SPECIALIZATION (fndecl))
&& (!DECL_TEMPLATE_SPECIALIZATION (function)
|| (DECL_TI_TEMPLATE (function) == DECL_TI_TEMPLATE (fndecl))))
matched = fndecl;
}
if (!matched)
{
if (!COMPLETE_TYPE_P (ctype))
cxx_incomplete_type_error (DECL_SOURCE_LOCATION (function),
function, ctype);
else
{
if (DECL_CONV_FN_P (function))
fns = get_class_binding (ctype, conv_op_identifier);
error_at (DECL_SOURCE_LOCATION (function),
"no declaration matches %q#D", function);
if (fns)
print_candidates (fns);
else if (DECL_CONV_FN_P (function))
inform (DECL_SOURCE_LOCATION (function),
"no conversion operators declared");
else
inform (DECL_SOURCE_LOCATION (function),
"no functions named %qD", function);
inform (DECL_SOURCE_LOCATION (TYPE_NAME (ctype)),
"%#qT defined here", ctype);
}
matched = error_mark_node;
}
if (pushed_scope)
pop_scope (pushed_scope);
return matched;
}
/* DECL is a function with vague linkage. Remember it so that at the
end of the translation unit we can decide whether or not to emit
it. */
void
note_vague_linkage_fn (tree decl)
{
if (processing_template_decl)
return;
DECL_DEFER_OUTPUT (decl) = 1;
vec_safe_push (deferred_fns, decl);
}
/* As above, but for variable template instantiations. */
void
note_variable_template_instantiation (tree decl)
{
vec_safe_push (pending_statics, decl);
}
/* We have just processed the DECL, which is a static data member.
The other parameters are as for cp_finish_decl. */
void
finish_static_data_member_decl (tree decl,
tree init, bool init_const_expr_p,
tree asmspec_tree,
int flags)
{
if (DECL_TEMPLATE_INSTANTIATED (decl))
/* We already needed to instantiate this, so the processing in this
function is unnecessary/wrong. */
return;
DECL_CONTEXT (decl) = current_class_type;
/* We cannot call pushdecl here, because that would fill in the
TREE_CHAIN of our decl. Instead, we modify cp_finish_decl to do
the right thing, namely, to put this decl out straight away. */
if (! processing_template_decl)
vec_safe_push (pending_statics, decl);
if (LOCAL_CLASS_P (current_class_type)
/* We already complained about the template definition. */
&& !DECL_TEMPLATE_INSTANTIATION (decl))
permerror (DECL_SOURCE_LOCATION (decl),
"local class %q#T shall not have static data member %q#D",
current_class_type, decl);
else
for (tree t = current_class_type; TYPE_P (t);
t = CP_TYPE_CONTEXT (t))
if (TYPE_UNNAMED_P (t))
{
auto_diagnostic_group d;
if (permerror (DECL_SOURCE_LOCATION (decl),
"static data member %qD in unnamed class", decl))
inform (DECL_SOURCE_LOCATION (TYPE_NAME (t)),
"unnamed class defined here");
break;
}
if (DECL_INLINE_VAR_P (decl) && !DECL_TEMPLATE_INSTANTIATION (decl))
/* An inline variable is immediately defined, so don't set DECL_IN_AGGR_P.
Except that if decl is a template instantiation, it isn't defined until
instantiate_decl. */;
else
DECL_IN_AGGR_P (decl) = 1;
if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
&& TYPE_DOMAIN (TREE_TYPE (decl)) == NULL_TREE)
SET_VAR_HAD_UNKNOWN_BOUND (decl);
if (init)
{
/* Similarly to start_decl_1, we want to complete the type in order
to do the right thing in cp_apply_type_quals_to_decl, possibly
clear TYPE_QUAL_CONST (c++/65579). */
tree type = TREE_TYPE (decl) = complete_type (TREE_TYPE (decl));
cp_apply_type_quals_to_decl (cp_type_quals (type), decl);
}
cp_finish_decl (decl, init, init_const_expr_p, asmspec_tree, flags);
}
/* DECLARATOR and DECLSPECS correspond to a class member. The other
parameters are as for cp_finish_decl. Return the DECL for the
class member declared. */
tree
grokfield (const cp_declarator *declarator,
cp_decl_specifier_seq *declspecs,
tree init, bool init_const_expr_p,
tree asmspec_tree,
tree attrlist)
{
tree value;
const char *asmspec = 0;
int flags;
if (init
&& TREE_CODE (init) == TREE_LIST
&& TREE_VALUE (init) == error_mark_node
&& TREE_CHAIN (init) == NULL_TREE)
init = NULL_TREE;
int initialized;
if (init == ridpointers[(int)RID_DELETE])
initialized = SD_DELETED;
else if (init == ridpointers[(int)RID_DEFAULT])
initialized = SD_DEFAULTED;
else if (init)
initialized = SD_INITIALIZED;
else
initialized = SD_UNINITIALIZED;
value = grokdeclarator (declarator, declspecs, FIELD, initialized, &attrlist);
if (! value || value == error_mark_node)
/* friend or constructor went bad. */
return error_mark_node;
if (TREE_TYPE (value) == error_mark_node)
return value;
if (TREE_CODE (value) == TYPE_DECL && init)
{
error_at (cp_expr_loc_or_loc (init, DECL_SOURCE_LOCATION (value)),
"typedef %qD is initialized (use %qs instead)",
value, "decltype");
init = NULL_TREE;
}
/* Pass friendly classes back. */
if (value == void_type_node)
return value;
if (DECL_NAME (value)
&& TREE_CODE (DECL_NAME (value)) == TEMPLATE_ID_EXPR)
{
error_at (declarator->id_loc,
"explicit template argument list not allowed");
return error_mark_node;
}
/* Stash away type declarations. */
if (TREE_CODE (value) == TYPE_DECL)
{
DECL_NONLOCAL (value) = 1;
DECL_CONTEXT (value) = current_class_type;
if (attrlist)
{
int attrflags = 0;
/* If this is a typedef that names the class for linkage purposes
(7.1.3p8), apply any attributes directly to the type. */
if (OVERLOAD_TYPE_P (TREE_TYPE (value))
&& value == TYPE_NAME (TYPE_MAIN_VARIANT (TREE_TYPE (value))))
attrflags = ATTR_FLAG_TYPE_IN_PLACE;
cplus_decl_attributes (&value, attrlist, attrflags);
}
if (decl_spec_seq_has_spec_p (declspecs, ds_typedef)
&& TREE_TYPE (value) != error_mark_node
&& TYPE_NAME (TYPE_MAIN_VARIANT (TREE_TYPE (value))) != value)
set_underlying_type (value);
/* It's important that push_template_decl below follows
set_underlying_type above so that the created template
carries the properly set type of VALUE. */
if (processing_template_decl)
value = push_template_decl (value);
record_locally_defined_typedef (value);
return value;
}
int friendp = decl_spec_seq_has_spec_p (declspecs, ds_friend);
if (!friendp && DECL_IN_AGGR_P (value))
{
error ("%qD is already defined in %qT", value, DECL_CONTEXT (value));
return void_type_node;
}
if (asmspec_tree && asmspec_tree != error_mark_node)
asmspec = TREE_STRING_POINTER (asmspec_tree);
if (init)
{
if (TREE_CODE (value) == FUNCTION_DECL)
{
if (init == ridpointers[(int)RID_DELETE])
{
DECL_DELETED_FN (value) = 1;
DECL_DECLARED_INLINE_P (value) = 1;
}
else if (init == ridpointers[(int)RID_DEFAULT])
{
if (defaultable_fn_check (value))
{
DECL_DEFAULTED_FN (value) = 1;
DECL_INITIALIZED_IN_CLASS_P (value) = 1;
DECL_DECLARED_INLINE_P (value) = 1;
/* grokfndecl set this to error_mark_node, but we want to
leave it unset until synthesize_method. */
DECL_INITIAL (value) = NULL_TREE;
}
}
else if (TREE_CODE (init) == DEFERRED_PARSE)
error ("invalid initializer for member function %qD", value);
else if (TREE_CODE (TREE_TYPE (value)) == METHOD_TYPE)
{
if (integer_zerop (init))
DECL_PURE_VIRTUAL_P (value) = 1;
else if (error_operand_p (init))
; /* An error has already been reported. */
else
error ("invalid initializer for member function %qD",
value);
}
else
{
gcc_assert (TREE_CODE (TREE_TYPE (value)) == FUNCTION_TYPE);
location_t iloc
= cp_expr_loc_or_loc (init, DECL_SOURCE_LOCATION (value));
if (friendp)
error_at (iloc, "initializer specified for friend "
"function %qD", value);
else
error_at (iloc, "initializer specified for static "
"member function %qD", value);
}
}
else if (TREE_CODE (value) == FIELD_DECL)
/* C++11 NSDMI, keep going. */;
else if (!VAR_P (value))
gcc_unreachable ();
}
/* Pass friend decls back. */
if ((TREE_CODE (value) == FUNCTION_DECL
|| TREE_CODE (value) == TEMPLATE_DECL)
&& DECL_CONTEXT (value) != current_class_type)
return value;
/* Need to set this before push_template_decl. */
if (VAR_P (value))
DECL_CONTEXT (value) = current_class_type;
if (processing_template_decl && VAR_OR_FUNCTION_DECL_P (value))
{
value = push_template_decl (value);
if (error_operand_p (value))
return error_mark_node;
}
if (attrlist)
cplus_decl_attributes (&value, attrlist, 0);
if (init && DIRECT_LIST_INIT_P (init))
flags = LOOKUP_NORMAL;
else
flags = LOOKUP_IMPLICIT;
switch (TREE_CODE (value))
{
case VAR_DECL:
finish_static_data_member_decl (value, init, init_const_expr_p,
asmspec_tree, flags);
return value;
case FIELD_DECL:
if (asmspec)
error ("%<asm%> specifiers are not permitted on non-static data members");
if (DECL_INITIAL (value) == error_mark_node)
init = error_mark_node;
cp_finish_decl (value, init, /*init_const_expr_p=*/false,
NULL_TREE, flags);
DECL_IN_AGGR_P (value) = 1;
return value;
case FUNCTION_DECL:
if (asmspec)
set_user_assembler_name (value, asmspec);
cp_finish_decl (value,
/*init=*/NULL_TREE,
/*init_const_expr_p=*/false,
asmspec_tree, flags);
/* Pass friends back this way. */
if (DECL_UNIQUE_FRIEND_P (value))
return void_type_node;
DECL_IN_AGGR_P (value) = 1;
return value;
default:
gcc_unreachable ();
}
return NULL_TREE;
}
/* Like `grokfield', but for bitfields.
WIDTH is the width of the bitfield, a constant expression.
The other parameters are as for grokfield. */
tree
grokbitfield (const cp_declarator *declarator,
cp_decl_specifier_seq *declspecs, tree width, tree init,
tree attrlist)
{
tree value = grokdeclarator (declarator, declspecs, BITFIELD,
init != NULL_TREE, &attrlist);
if (value == error_mark_node)
return NULL_TREE; /* friends went bad. */
tree type = TREE_TYPE (value);
if (type == error_mark_node)
return value;
/* Pass friendly classes back. */
if (VOID_TYPE_P (value))
return void_type_node;
if (!INTEGRAL_OR_ENUMERATION_TYPE_P (type)
&& (INDIRECT_TYPE_P (type) || !dependent_type_p (type)))
{
error_at (DECL_SOURCE_LOCATION (value),
"bit-field %qD with non-integral type %qT",
value, type);
return error_mark_node;
}
if (TREE_CODE (value) == TYPE_DECL)
{
error_at (DECL_SOURCE_LOCATION (value),
"cannot declare %qD to be a bit-field type", value);
return NULL_TREE;
}
/* Usually, finish_struct_1 catches bitfields with invalid types.
But, in the case of bitfields with function type, we confuse
ourselves into thinking they are member functions, so we must
check here. */
if (TREE_CODE (value) == FUNCTION_DECL)
{
error_at (DECL_SOURCE_LOCATION (value),
"cannot declare bit-field %qD with function type", value);
return NULL_TREE;
}
if (TYPE_WARN_IF_NOT_ALIGN (type))
{
error_at (DECL_SOURCE_LOCATION (value), "cannot declare bit-field "
"%qD with %<warn_if_not_aligned%> type", value);
return NULL_TREE;
}
if (DECL_IN_AGGR_P (value))
{
error ("%qD is already defined in the class %qT", value,
DECL_CONTEXT (value));
return void_type_node;
}
if (TREE_STATIC (value))
{
error_at (DECL_SOURCE_LOCATION (value),
"static member %qD cannot be a bit-field", value);
return NULL_TREE;
}
int flags = LOOKUP_IMPLICIT;
if (init && DIRECT_LIST_INIT_P (init))
flags = LOOKUP_NORMAL;
cp_finish_decl (value, init, false, NULL_TREE, flags);
if (width != error_mark_node)
{
/* The width must be an integer type. */
if (!type_dependent_expression_p (width)
&& !INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P (TREE_TYPE (width)))
error ("width of bit-field %qD has non-integral type %qT", value,
TREE_TYPE (width));
else
{
/* Temporarily stash the width in DECL_BIT_FIELD_REPRESENTATIVE.
check_bitfield_decl picks it from there later and sets DECL_SIZE
accordingly. */
DECL_BIT_FIELD_REPRESENTATIVE (value) = width;
SET_DECL_C_BIT_FIELD (value);
}
}
DECL_IN_AGGR_P (value) = 1;
if (attrlist)
cplus_decl_attributes (&value, attrlist, /*flags=*/0);
return value;
}
/* Returns true iff ATTR is an attribute which needs to be applied at
instantiation time rather than template definition time. */
static bool
is_late_template_attribute (tree attr, tree decl)
{
tree name = get_attribute_name (attr);
tree args = TREE_VALUE (attr);
const struct attribute_spec *spec = lookup_attribute_spec (name);
tree arg;
if (!spec)
/* Unknown attribute. */
return false;
/* Attribute weak handling wants to write out assembly right away. */
if (is_attribute_p ("weak", name))
return true;
/* Attributes used and unused are applied directly to typedefs for the
benefit of maybe_warn_unused_local_typedefs. */
if (TREE_CODE (decl) == TYPE_DECL
&& (is_attribute_p ("unused", name)
|| is_attribute_p ("used", name)))
return false;
/* Attribute tls_model wants to modify the symtab. */
if (is_attribute_p ("tls_model", name))
return true;
/* #pragma omp declare simd attribute needs to be always deferred. */
if (flag_openmp
&& is_attribute_p ("omp declare simd", name))
return true;
if (args == error_mark_node)
return false;
/* An attribute pack is clearly dependent. */
if (args && PACK_EXPANSION_P (args))
return true;
/* If any of the arguments are dependent expressions, we can't evaluate
the attribute until instantiation time. */
for (arg = args; arg; arg = TREE_CHAIN (arg))
{
tree t = TREE_VALUE (arg);
/* If the first attribute argument is an identifier, only consider
second and following arguments. Attributes like mode, format,
cleanup and several target specific attributes aren't late
just because they have an IDENTIFIER_NODE as first argument. */
if (arg == args && attribute_takes_identifier_p (name)
&& identifier_p (t))
continue;
if (value_dependent_expression_p (t))
return true;
}
if (TREE_CODE (decl) == TYPE_DECL
|| TYPE_P (decl)
|| spec->type_required)
{
tree type = TYPE_P (decl) ? decl : TREE_TYPE (decl);
/* We can't apply any attributes to a completely unknown type until
instantiation time. */
enum tree_code code = TREE_CODE (type);
if (code == TEMPLATE_TYPE_PARM
|| code == BOUND_TEMPLATE_TEMPLATE_PARM
|| code == TYPENAME_TYPE)
return true;
/* Also defer most attributes on dependent types. This is not
necessary in all cases, but is the better default. */
else if (dependent_type_p (type)
/* But some attributes specifically apply to templates. */
&& !is_attribute_p ("abi_tag", name)
&& !is_attribute_p ("deprecated", name)
&& !is_attribute_p ("visibility", name))
return true;
else
return false;
}
else
return false;
}
/* ATTR_P is a list of attributes. Remove any attributes which need to be
applied at instantiation time and return them. If IS_DEPENDENT is true,
the declaration itself is dependent, so all attributes should be applied
at instantiation time. */
tree
splice_template_attributes (tree *attr_p, tree decl)
{
tree *p = attr_p;
tree late_attrs = NULL_TREE;
tree *q = &late_attrs;
if (!p)
return NULL_TREE;
for (; *p; )
{
if (is_late_template_attribute (*p, decl))
{
ATTR_IS_DEPENDENT (*p) = 1;
*q = *p;
*p = TREE_CHAIN (*p);
q = &TREE_CHAIN (*q);
*q = NULL_TREE;
}
else
p = &TREE_CHAIN (*p);
}
return late_attrs;
}
/* Remove any late attributes from the list in ATTR_P and attach them to
DECL_P. */
static void
save_template_attributes (tree *attr_p, tree *decl_p, int flags)
{
tree *q;
if (attr_p && *attr_p == error_mark_node)
return;
tree late_attrs = splice_template_attributes (attr_p, *decl_p);
if (!late_attrs)
return;
if (DECL_P (*decl_p))
q = &DECL_ATTRIBUTES (*decl_p);
else
q = &TYPE_ATTRIBUTES (*decl_p);
tree old_attrs = *q;
/* Merge the late attributes at the beginning with the attribute
list. */
late_attrs = merge_attributes (late_attrs, *q);
if (*q != late_attrs
&& !DECL_P (*decl_p)
&& !(flags & ATTR_FLAG_TYPE_IN_PLACE))
{
if (!dependent_type_p (*decl_p))
*decl_p = cp_build_type_attribute_variant (*decl_p, late_attrs);
else
{
*decl_p = build_variant_type_copy (*decl_p);
TYPE_ATTRIBUTES (*decl_p) = late_attrs;
}
}
else
*q = late_attrs;
if (!DECL_P (*decl_p) && *decl_p == TYPE_MAIN_VARIANT (*decl_p))
{
/* We've added new attributes directly to the main variant, so
now we need to update all of the other variants to include
these new attributes. */
tree variant;
for (variant = TYPE_NEXT_VARIANT (*decl_p); variant;
variant = TYPE_NEXT_VARIANT (variant))
{
gcc_assert (TYPE_ATTRIBUTES (variant) == old_attrs);
TYPE_ATTRIBUTES (variant) = TYPE_ATTRIBUTES (*decl_p);
}
}
}
/* True if ATTRS contains any dependent attributes that affect type
identity. */
bool
any_dependent_type_attributes_p (tree attrs)
{
for (tree a = attrs; a; a = TREE_CHAIN (a))
if (ATTR_IS_DEPENDENT (a))
{
const attribute_spec *as = lookup_attribute_spec (TREE_PURPOSE (a));
if (as && as->affects_type_identity)
return true;
}
return false;
}
/* Return true iff ATTRS are acceptable attributes to be applied in-place
to a typedef which gives a previously unnamed class or enum a name for
linkage purposes. */
bool
attributes_naming_typedef_ok (tree attrs)
{
for (; attrs; attrs = TREE_CHAIN (attrs))
{
tree name = get_attribute_name (attrs);
if (is_attribute_p ("vector_size", name))
return false;
}
return true;
}
/* Like reconstruct_complex_type, but handle also template trees. */
tree
cp_reconstruct_complex_type (tree type, tree bottom)
{
tree inner, outer;
if (TYPE_PTR_P (type))
{
inner = cp_reconstruct_complex_type (TREE_TYPE (type), bottom);
outer = build_pointer_type_for_mode (inner, TYPE_MODE (type),
TYPE_REF_CAN_ALIAS_ALL (type));
}
else if (TYPE_REF_P (type))
{
inner = cp_reconstruct_complex_type (TREE_TYPE (type), bottom);
outer = build_reference_type_for_mode (inner, TYPE_MODE (type),
TYPE_REF_CAN_ALIAS_ALL (type));
}
else if (TREE_CODE (type) == ARRAY_TYPE)
{
inner = cp_reconstruct_complex_type (TREE_TYPE (type), bottom);
outer = build_cplus_array_type (inner, TYPE_DOMAIN (type));
/* Don't call cp_build_qualified_type on ARRAY_TYPEs, the
element type qualification will be handled by the recursive
cp_reconstruct_complex_type call and cp_build_qualified_type
for ARRAY_TYPEs changes the element type. */
return outer;
}
else if (TREE_CODE (type) == FUNCTION_TYPE)
{
inner = cp_reconstruct_complex_type (TREE_TYPE (type), bottom);
outer = build_function_type (inner, TYPE_ARG_TYPES (type));
outer = apply_memfn_quals (outer, type_memfn_quals (type));
}
else if (TREE_CODE (type) == METHOD_TYPE)
{
inner = cp_reconstruct_complex_type (TREE_TYPE (type), bottom);
/* The build_method_type_directly() routine prepends 'this' to argument list,
so we must compensate by getting rid of it. */
outer
= build_method_type_directly
(class_of_this_parm (type), inner,
TREE_CHAIN (TYPE_ARG_TYPES (type)));
}
else if (TREE_CODE (type) == OFFSET_TYPE)
{
inner = cp_reconstruct_complex_type (TREE_TYPE (type), bottom);
outer = build_offset_type (TYPE_OFFSET_BASETYPE (type), inner);
}
else
return bottom;
if (TYPE_ATTRIBUTES (type))
outer = cp_build_type_attribute_variant (outer, TYPE_ATTRIBUTES (type));
outer = cp_build_qualified_type (outer, cp_type_quals (type));
outer = cxx_copy_lang_qualifiers (outer, type);
return outer;
}
/* Replaces any constexpr expression that may be into the attributes
arguments with their reduced value. */
void
cp_check_const_attributes (tree attributes)
{
if (attributes == error_mark_node)
return;
tree attr;
for (attr = attributes; attr; attr = TREE_CHAIN (attr))
{
tree arg;
for (arg = TREE_VALUE (attr); arg && TREE_CODE (arg) == TREE_LIST;
arg = TREE_CHAIN (arg))
{
tree expr = TREE_VALUE (arg);
if (EXPR_P (expr))
TREE_VALUE (arg) = fold_non_dependent_expr (expr);
}
}
}
/* Return true if TYPE is an OpenMP mappable type.
If NOTES is non-zero, emit a note message for each problem. */
static bool
cp_omp_mappable_type_1 (tree type, bool notes)
{
bool result = true;
/* Mappable type has to be complete. */
if (type == error_mark_node || !COMPLETE_TYPE_P (type))
{
if (notes && type != error_mark_node)
{
tree decl = TYPE_MAIN_DECL (type);
inform ((decl ? DECL_SOURCE_LOCATION (decl) : input_location),
"incomplete type %qT is not mappable", type);
}
result = false;
}
/* Arrays have mappable type if the elements have mappable type. */
while (TREE_CODE (type) == ARRAY_TYPE)
type = TREE_TYPE (type);
/* A mappable type cannot contain virtual members. */
if (CLASS_TYPE_P (type) && CLASSTYPE_VTABLES (type))
{
if (notes)
inform (DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (type)),
"type %qT with virtual members is not mappable", type);
result = false;
}
/* All data members must be non-static. */
if (CLASS_TYPE_P (type))
{
tree field;
for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
if (VAR_P (field))
{
if (notes)
inform (DECL_SOURCE_LOCATION (field),
"static field %qD is not mappable", field);
result = false;
}
/* All fields must have mappable types. */
else if (TREE_CODE (field) == FIELD_DECL
&& !cp_omp_mappable_type_1 (TREE_TYPE (field), notes))
result = false;
}
return result;
}
/* Return true if TYPE is an OpenMP mappable type. */
bool
cp_omp_mappable_type (tree type)
{
return cp_omp_mappable_type_1 (type, false);
}
/* Return true if TYPE is an OpenMP mappable type.
Emit an error messages if not. */
bool
cp_omp_emit_unmappable_type_notes (tree type)
{
return cp_omp_mappable_type_1 (type, true);
}
/* Return the last pushed declaration for the symbol DECL or NULL
when no such declaration exists. */
static tree
find_last_decl (tree decl)
{
tree last_decl = NULL_TREE;
if (tree name = DECL_P (decl) ? DECL_NAME (decl) : NULL_TREE)
{
/* Look up the declaration in its scope. */
tree pushed_scope = NULL_TREE;
if (tree ctype = DECL_CONTEXT (decl))
pushed_scope = push_scope (ctype);
last_decl = lookup_name (name);
if (pushed_scope)
pop_scope (pushed_scope);
/* The declaration may be a member conversion operator
or a bunch of overfloads (handle the latter below). */
if (last_decl && BASELINK_P (last_decl))
last_decl = BASELINK_FUNCTIONS (last_decl);
}
if (!last_decl)
return NULL_TREE;
if (DECL_P (last_decl) || TREE_CODE (last_decl) == OVERLOAD)
{
/* A set of overloads of the same function. */
for (lkp_iterator iter (last_decl); iter; ++iter)
{
if (TREE_CODE (*iter) == OVERLOAD)
continue;
if (decls_match (decl, *iter, /*record_decls=*/false))
return *iter;
}
return NULL_TREE;
}
return NULL_TREE;
}
/* Like decl_attributes, but handle C++ complexity. */
void
cplus_decl_attributes (tree *decl, tree attributes, int flags)
{
if (*decl == NULL_TREE || *decl == void_type_node
|| *decl == error_mark_node)
return;
/* Add implicit "omp declare target" attribute if requested. */
if (scope_chain->omp_declare_target_attribute
&& ((VAR_P (*decl)
&& (TREE_STATIC (*decl) || DECL_EXTERNAL (*decl)))
|| TREE_CODE (*decl) == FUNCTION_DECL))
{
if (VAR_P (*decl)
&& DECL_CLASS_SCOPE_P (*decl))
error ("%q+D static data member inside of declare target directive",
*decl);
else if (VAR_P (*decl)
&& (processing_template_decl
|| !cp_omp_mappable_type (TREE_TYPE (*decl))))
attributes = tree_cons (get_identifier ("omp declare target implicit"),
NULL_TREE, attributes);
else
{
attributes = tree_cons (get_identifier ("omp declare target"),
NULL_TREE, attributes);
attributes = tree_cons (get_identifier ("omp declare target block"),
NULL_TREE, attributes);
}
}
if (processing_template_decl)
{
if (check_for_bare_parameter_packs (attributes))
return;
save_template_attributes (&attributes, decl, flags);
}
cp_check_const_attributes (attributes);
if (TREE_CODE (*decl) == TEMPLATE_DECL)
decl = &DECL_TEMPLATE_RESULT (*decl);
if (TREE_TYPE (*decl) && TYPE_PTRMEMFUNC_P (TREE_TYPE (*decl)))
{
attributes
= decl_attributes (decl, attributes, flags | ATTR_FLAG_FUNCTION_NEXT);
decl_attributes (&TYPE_PTRMEMFUNC_FN_TYPE_RAW (TREE_TYPE (*decl)),
attributes, flags);
}
else
{
tree last_decl = find_last_decl (*decl);
decl_attributes (decl, attributes, flags, last_decl);
}
if (TREE_CODE (*decl) == TYPE_DECL)
SET_IDENTIFIER_TYPE_VALUE (DECL_NAME (*decl), TREE_TYPE (*decl));
/* Propagate deprecation out to the template. */
if (TREE_DEPRECATED (*decl))
if (tree ti = get_template_info (*decl))
{
tree tmpl = TI_TEMPLATE (ti);
tree pattern = (TYPE_P (*decl) ? TREE_TYPE (tmpl)
: DECL_TEMPLATE_RESULT (tmpl));
if (*decl == pattern)
TREE_DEPRECATED (tmpl) = true;
}
}
/* Walks through the namespace- or function-scope anonymous union
OBJECT, with the indicated TYPE, building appropriate VAR_DECLs.
Returns one of the fields for use in the mangled name. */
static tree
build_anon_union_vars (tree type, tree object)
{
tree main_decl = NULL_TREE;
tree field;
/* Rather than write the code to handle the non-union case,
just give an error. */
if (TREE_CODE (type) != UNION_TYPE)
{
error_at (DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (type)),
"anonymous struct not inside named type");
return error_mark_node;
}
for (field = TYPE_FIELDS (type);
field != NULL_TREE;
field = DECL_CHAIN (field))
{
tree decl;
tree ref;
if (DECL_ARTIFICIAL (field))
continue;
if (TREE_CODE (field) != FIELD_DECL)
{
permerror (DECL_SOURCE_LOCATION (field),
"%q#D invalid; an anonymous union can only "
"have non-static data members", field);
continue;
}
if (TREE_PRIVATE (field))
permerror (DECL_SOURCE_LOCATION (field),
"private member %q#D in anonymous union", field);
else if (TREE_PROTECTED (field))
permerror (DECL_SOURCE_LOCATION (field),
"protected member %q#D in anonymous union", field);
if (processing_template_decl)
ref = build_min_nt_loc (UNKNOWN_LOCATION, COMPONENT_REF, object,
DECL_NAME (field), NULL_TREE);
else
ref = build_class_member_access_expr (object, field, NULL_TREE,
false, tf_warning_or_error);
if (DECL_NAME (field))
{
tree base;
decl = build_decl (input_location,
VAR_DECL, DECL_NAME (field), TREE_TYPE (field));
DECL_ANON_UNION_VAR_P (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
base = get_base_address (object);
TREE_PUBLIC (decl) = TREE_PUBLIC (base);
TREE_STATIC (decl) = TREE_STATIC (base);
DECL_EXTERNAL (decl) = DECL_EXTERNAL (base);
SET_DECL_VALUE_EXPR (decl, ref);
DECL_HAS_VALUE_EXPR_P (decl) = 1;
decl = pushdecl (decl);
}
else if (ANON_AGGR_TYPE_P (TREE_TYPE (field)))
decl = build_anon_union_vars (TREE_TYPE (field), ref);
else
decl = 0;
if (main_decl == NULL_TREE)
main_decl = decl;
}
return main_decl;
}
/* Finish off the processing of a UNION_TYPE structure. If the union is an
anonymous union, then all members must be laid out together. PUBLIC_P
is nonzero if this union is not declared static. */
void
finish_anon_union (tree anon_union_decl)
{
tree type;
tree main_decl;
bool public_p;
if (anon_union_decl == error_mark_node)
return;
type = TREE_TYPE (anon_union_decl);
public_p = TREE_PUBLIC (anon_union_decl);
/* The VAR_DECL's context is the same as the TYPE's context. */
DECL_CONTEXT (anon_union_decl) = DECL_CONTEXT (TYPE_NAME (type));
if (TYPE_FIELDS (type) == NULL_TREE)
return;
if (public_p)
{
error ("namespace-scope anonymous aggregates must be static");
return;
}
main_decl = build_anon_union_vars (type, anon_union_decl);
if (main_decl == error_mark_node)
return;
if (main_decl == NULL_TREE)
{
pedwarn (input_location, 0, "anonymous union with no members");
return;
}
if (!processing_template_decl)
{
/* Use main_decl to set the mangled name. */
DECL_NAME (anon_union_decl) = DECL_NAME (main_decl);
maybe_commonize_var (anon_union_decl);
if (TREE_STATIC (anon_union_decl) || DECL_EXTERNAL (anon_union_decl))
{
if (DECL_DISCRIMINATOR_P (anon_union_decl))
determine_local_discriminator (anon_union_decl);
mangle_decl (anon_union_decl);
}
DECL_NAME (anon_union_decl) = NULL_TREE;
}
pushdecl (anon_union_decl);
cp_finish_decl (anon_union_decl, NULL_TREE, false, NULL_TREE, 0);
}
/* Auxiliary functions to make type signatures for
`operator new' and `operator delete' correspond to
what compiler will be expecting. */
tree
coerce_new_type (tree type, location_t loc)
{
int e = 0;
tree args = TYPE_ARG_TYPES (type);
gcc_assert (TREE_CODE (type) == FUNCTION_TYPE);
if (!same_type_p (TREE_TYPE (type), ptr_type_node))
{
e = 1;
error_at (loc, "%<operator new%> must return type %qT",
ptr_type_node);
}
if (args && args != void_list_node)
{
if (TREE_PURPOSE (args))
{
/* [basic.stc.dynamic.allocation]
The first parameter shall not have an associated default
argument. */
error_at (loc, "the first parameter of %<operator new%> cannot "
"have a default argument");
/* Throw away the default argument. */
TREE_PURPOSE (args) = NULL_TREE;
}
if (!same_type_p (TREE_VALUE (args), size_type_node))
{
e = 2;
args = TREE_CHAIN (args);
}
}
else
e = 2;
if (e == 2)
permerror (loc, "%<operator new%> takes type %<size_t%> (%qT) "
"as first parameter", size_type_node);
switch (e)
{
case 2:
args = tree_cons (NULL_TREE, size_type_node, args);
/* Fall through. */
case 1:
type = (cxx_copy_lang_qualifiers
(build_function_type (ptr_type_node, args),
type));
/* Fall through. */
default:;
}
return type;
}
void
coerce_delete_type (tree decl, location_t loc)
{
int e = 0;
tree type = TREE_TYPE (decl);
tree args = TYPE_ARG_TYPES (type);
gcc_assert (TREE_CODE (type) == FUNCTION_TYPE);
if (!same_type_p (TREE_TYPE (type), void_type_node))
{
e = 1;
error_at (loc, "%<operator delete%> must return type %qT",
void_type_node);
}
tree ptrtype = ptr_type_node;
if (destroying_delete_p (decl))
{
if (DECL_CLASS_SCOPE_P (decl))
/* If the function is a destroying operator delete declared in class
type C, the type of its first parameter shall be C*. */
ptrtype = build_pointer_type (DECL_CONTEXT (decl));
else
/* A destroying operator delete shall be a class member function named
operator delete. */
error_at (loc,
"destroying %<operator delete%> must be a member function");
const ovl_op_info_t *op = IDENTIFIER_OVL_OP_INFO (DECL_NAME (decl));
if (op->flags & OVL_OP_FLAG_VEC)
error_at (loc, "%<operator delete[]%> cannot be a destroying delete");
if (!usual_deallocation_fn_p (decl))
error_at (loc, "destroying %<operator delete%> must be a usual "
"deallocation function");
}
if (!args || args == void_list_node
|| !same_type_p (TREE_VALUE (args), ptrtype))
{
e = 2;
if (args && args != void_list_node)
args = TREE_CHAIN (args);
error_at (loc, "%<operator delete%> takes type %qT as first parameter",
ptrtype);
}
switch (e)
{
case 2:
args = tree_cons (NULL_TREE, ptrtype, args);
/* Fall through. */
case 1:
type = (cxx_copy_lang_qualifiers
(build_function_type (void_type_node, args),
type));
/* Fall through. */
default:;
}
TREE_TYPE (decl) = type;
}
/* DECL is a VAR_DECL for a vtable: walk through the entries in the vtable
and mark them as needed. */
static void
mark_vtable_entries (tree decl, vec<tree> &consteval_vtables)
{
tree fnaddr;
unsigned HOST_WIDE_INT idx;
/* It's OK for the vtable to refer to deprecated virtual functions. */
warning_sentinel w(warn_deprecated_decl);
bool consteval_seen = false;
FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (DECL_INITIAL (decl)),
idx, fnaddr)
{
tree fn;
STRIP_NOPS (fnaddr);
if (TREE_CODE (fnaddr) != ADDR_EXPR
&& TREE_CODE (fnaddr) != FDESC_EXPR)
/* This entry is an offset: a virtual base class offset, a
virtual call offset, an RTTI offset, etc. */
continue;
fn = TREE_OPERAND (fnaddr, 0);
if (TREE_CODE (fn) == FUNCTION_DECL && DECL_IMMEDIATE_FUNCTION_P (fn))
{
if (!consteval_seen)
{
consteval_seen = true;
consteval_vtables.safe_push (decl);
}
continue;
}
TREE_ADDRESSABLE (fn) = 1;
/* When we don't have vcall offsets, we output thunks whenever
we output the vtables that contain them. With vcall offsets,
we know all the thunks we'll need when we emit a virtual
function, so we emit the thunks there instead. */
if (DECL_THUNK_P (fn))
use_thunk (fn, /*emit_p=*/0);
/* Set the location, as marking the function could cause
instantiation. We do not need to preserve the incoming
location, as we're called from c_parse_final_cleanups, which
takes care of that. */
input_location = DECL_SOURCE_LOCATION (fn);
mark_used (fn);
}
}
/* Replace any consteval functions in vtables with null pointers. */
static void
clear_consteval_vfns (vec<tree> &consteval_vtables)
{
for (tree vtable : consteval_vtables)
for (constructor_elt &elt : *CONSTRUCTOR_ELTS (DECL_INITIAL (vtable)))
{
tree fn = cp_get_fndecl_from_callee (elt.value, /*fold*/false);
if (fn && DECL_IMMEDIATE_FUNCTION_P (fn))
elt.value = build_zero_cst (vtable_entry_type);
}
}
/* Adjust the TLS model on variable DECL if need be, typically after
the linkage of DECL has been modified. */
static void
adjust_var_decl_tls_model (tree decl)
{
if (CP_DECL_THREAD_LOCAL_P (decl)
&& !lookup_attribute ("tls_model", DECL_ATTRIBUTES (decl)))
set_decl_tls_model (decl, decl_default_tls_model (decl));
}
/* Set DECL up to have the closest approximation of "initialized common"
linkage available. */
void
comdat_linkage (tree decl)
{
if (flag_weak)
make_decl_one_only (decl, cxx_comdat_group (decl));
else if (TREE_CODE (decl) == FUNCTION_DECL
|| (VAR_P (decl) && DECL_ARTIFICIAL (decl)))
/* We can just emit function and compiler-generated variables
statically; having multiple copies is (for the most part) only
a waste of space.
There are two correctness issues, however: the address of a
template instantiation with external linkage should be the
same, independent of what translation unit asks for the
address, and this will not hold when we emit multiple copies of
the function. However, there's little else we can do.
Also, by default, the typeinfo implementation assumes that
there will be only one copy of the string used as the name for
each type. Therefore, if weak symbols are unavailable, the
run-time library should perform a more conservative check; it
should perform a string comparison, rather than an address
comparison. */
TREE_PUBLIC (decl) = 0;
else
{
/* Static data member template instantiations, however, cannot
have multiple copies. */
if (DECL_INITIAL (decl) == 0
|| DECL_INITIAL (decl) == error_mark_node)
DECL_COMMON (decl) = 1;
else if (EMPTY_CONSTRUCTOR_P (DECL_INITIAL (decl)))
{
DECL_COMMON (decl) = 1;
DECL_INITIAL (decl) = error_mark_node;
}
else if (!DECL_EXPLICIT_INSTANTIATION (decl))
{
/* We can't do anything useful; leave vars for explicit
instantiation. */
DECL_EXTERNAL (decl) = 1;
DECL_NOT_REALLY_EXTERN (decl) = 0;
}
}
if (TREE_PUBLIC (decl))
DECL_COMDAT (decl) = 1;
if (VAR_P (decl))
adjust_var_decl_tls_model (decl);
}
/* For win32 we also want to put explicit instantiations in
linkonce sections, so that they will be merged with implicit
instantiations; otherwise we get duplicate symbol errors.
For Darwin we do not want explicit instantiations to be
linkonce. */
void
maybe_make_one_only (tree decl)
{
/* We used to say that this was not necessary on targets that support weak
symbols, because the implicit instantiations will defer to the explicit
one. However, that's not actually the case in SVR4; a strong definition
after a weak one is an error. Also, not making explicit
instantiations one_only means that we can end up with two copies of
some template instantiations. */
if (! flag_weak)
return;
/* We can't set DECL_COMDAT on functions, or cp_finish_file will think
we can get away with not emitting them if they aren't used. We need
to for variables so that cp_finish_decl will update their linkage,
because their DECL_INITIAL may not have been set properly yet. */
if (!TARGET_WEAK_NOT_IN_ARCHIVE_TOC
|| (! DECL_EXPLICIT_INSTANTIATION (decl)
&& ! DECL_TEMPLATE_SPECIALIZATION (decl)))
{
make_decl_one_only (decl, cxx_comdat_group (decl));
if (VAR_P (decl))
{
varpool_node *node = varpool_node::get_create (decl);
DECL_COMDAT (decl) = 1;
/* Mark it needed so we don't forget to emit it. */
node->forced_by_abi = true;
TREE_USED (decl) = 1;
adjust_var_decl_tls_model (decl);
}
}
}
/* Returns true iff DECL, a FUNCTION_DECL or VAR_DECL, has vague linkage.
This predicate will give the right answer during parsing of the
function, which other tests may not. */
bool
vague_linkage_p (tree decl)
{
if (!TREE_PUBLIC (decl))
{
/* maybe_thunk_body clears TREE_PUBLIC and DECL_ABSTRACT_P on the
maybe-in-charge 'tor variants; in that case we need to check one of
the "clones" for the real linkage. But only in that case; before
maybe_clone_body we haven't yet copied the linkage to the clones. */
if (DECL_MAYBE_IN_CHARGE_CDTOR_P (decl)
&& !DECL_ABSTRACT_P (decl)
&& DECL_CHAIN (decl)
&& DECL_CLONED_FUNCTION_P (DECL_CHAIN (decl)))
return vague_linkage_p (DECL_CHAIN (decl));
gcc_checking_assert (!DECL_COMDAT (decl));
return false;
}
/* Unfortunately, import_export_decl has not always been called
before the function is processed, so we cannot simply check
DECL_COMDAT. */
if (DECL_COMDAT (decl)
|| (TREE_CODE (decl) == FUNCTION_DECL
&& DECL_DECLARED_INLINE_P (decl))
|| (DECL_LANG_SPECIFIC (decl)
&& DECL_TEMPLATE_INSTANTIATION (decl))
|| (VAR_P (decl) && DECL_INLINE_VAR_P (decl)))
return true;
else if (DECL_FUNCTION_SCOPE_P (decl))
/* A local static in an inline effectively has vague linkage. */
return (TREE_STATIC (decl)
&& vague_linkage_p (DECL_CONTEXT (decl)));
else
return false;
}
/* Determine whether or not we want to specifically import or export CTYPE,
using various heuristics. */
static void
import_export_class (tree ctype)
{
/* -1 for imported, 1 for exported. */
int import_export = 0;
/* It only makes sense to call this function at EOF. The reason is
that this function looks at whether or not the first non-inline
non-abstract virtual member function has been defined in this
translation unit. But, we can't possibly know that until we've
seen the entire translation unit. */
gcc_assert (at_eof);
if (CLASSTYPE_INTERFACE_KNOWN (ctype))
return;
/* If MULTIPLE_SYMBOL_SPACES is set and we saw a #pragma interface,
we will have CLASSTYPE_INTERFACE_ONLY set but not
CLASSTYPE_INTERFACE_KNOWN. In that case, we don't want to use this
heuristic because someone will supply a #pragma implementation
elsewhere, and deducing it here would produce a conflict. */
if (CLASSTYPE_INTERFACE_ONLY (ctype))
return;
if (lookup_attribute ("dllimport", TYPE_ATTRIBUTES (ctype)))
import_export = -1;
else if (lookup_attribute ("dllexport", TYPE_ATTRIBUTES (ctype)))
import_export = 1;
else if (CLASSTYPE_IMPLICIT_INSTANTIATION (ctype)
&& !flag_implicit_templates)
/* For a template class, without -fimplicit-templates, check the
repository. If the virtual table is assigned to this
translation unit, then export the class; otherwise, import
it. */
import_export = -1;
else if (TYPE_POLYMORPHIC_P (ctype))
{
/* The ABI specifies that the virtual table and associated
information are emitted with the key method, if any. */
tree method = CLASSTYPE_KEY_METHOD (ctype);
/* If weak symbol support is not available, then we must be
careful not to emit the vtable when the key function is
inline. An inline function can be defined in multiple
translation units. If we were to emit the vtable in each
translation unit containing a definition, we would get
multiple definition errors at link-time. */
if (method && (flag_weak || ! DECL_DECLARED_INLINE_P (method)))
import_export = (DECL_REALLY_EXTERN (method) ? -1 : 1);
}
/* When MULTIPLE_SYMBOL_SPACES is set, we cannot count on seeing
a definition anywhere else. */
if (MULTIPLE_SYMBOL_SPACES && import_export == -1)
import_export = 0;
/* Allow back ends the chance to overrule the decision. */
if (targetm.cxx.import_export_class)
import_export = targetm.cxx.import_export_class (ctype, import_export);
if (import_export)
{
SET_CLASSTYPE_INTERFACE_KNOWN (ctype);
CLASSTYPE_INTERFACE_ONLY (ctype) = (import_export < 0);
}
}
/* Return true if VAR has already been provided to the back end; in that
case VAR should not be modified further by the front end. */
static bool
var_finalized_p (tree var)
{
return varpool_node::get_create (var)->definition;
}
/* DECL is a VAR_DECL or FUNCTION_DECL which, for whatever reason,
must be emitted in this translation unit. Mark it as such. */
void
mark_needed (tree decl)
{
TREE_USED (decl) = 1;
if (TREE_CODE (decl) == FUNCTION_DECL)
{
/* Extern inline functions don't become needed when referenced.
If we know a method will be emitted in other TU and no new
functions can be marked reachable, just use the external
definition. */
struct cgraph_node *node = cgraph_node::get_create (decl);
node->forced_by_abi = true;
/* #pragma interface can call mark_needed for
maybe-in-charge 'tors; mark the clones as well. */
tree clone;
FOR_EACH_CLONE (clone, decl)
mark_needed (clone);
}
else if (VAR_P (decl))
{
varpool_node *node = varpool_node::get_create (decl);
/* C++ frontend use mark_decl_references to force COMDAT variables
to be output that might appear dead otherwise. */
node->forced_by_abi = true;
}
}
/* DECL is either a FUNCTION_DECL or a VAR_DECL. This function
returns true if a definition of this entity should be provided in
this object file. Callers use this function to determine whether
or not to let the back end know that a definition of DECL is
available in this translation unit. */
bool
decl_needed_p (tree decl)
{
gcc_assert (VAR_OR_FUNCTION_DECL_P (decl));
/* This function should only be called at the end of the translation
unit. We cannot be sure of whether or not something will be
COMDAT until that point. */
gcc_assert (at_eof);
/* All entities with external linkage that are not COMDAT/EXTERN should be
emitted; they may be referred to from other object files. */
if (TREE_PUBLIC (decl) && !DECL_COMDAT (decl) && !DECL_REALLY_EXTERN (decl))
return true;
/* Functions marked "dllexport" must be emitted so that they are
visible to other DLLs. */
if (flag_keep_inline_dllexport
&& lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)))
return true;
/* When not optimizing, do not bother to produce definitions for extern
symbols. */
if (DECL_REALLY_EXTERN (decl)
&& ((TREE_CODE (decl) != FUNCTION_DECL
&& !optimize)
|| (TREE_CODE (decl) == FUNCTION_DECL
&& !opt_for_fn (decl, optimize)))
&& !lookup_attribute ("always_inline", decl))
return false;
/* If this entity was used, let the back end see it; it will decide
whether or not to emit it into the object file. */
if (TREE_USED (decl))
return true;
/* Virtual functions might be needed for devirtualization. */
if (flag_devirtualize
&& TREE_CODE (decl) == FUNCTION_DECL
&& DECL_VIRTUAL_P (decl))
return true;
/* Otherwise, DECL does not need to be emitted -- yet. A subsequent
reference to DECL might cause it to be emitted later. */
return false;
}
/* If necessary, write out the vtables for the dynamic class CTYPE.
Returns true if any vtables were emitted. */
static bool
maybe_emit_vtables (tree ctype, vec<tree> &consteval_vtables)
{
tree vtbl;
tree primary_vtbl;
int needed = 0;
varpool_node *current = NULL, *last = NULL;
/* If the vtables for this class have already been emitted there is
nothing more to do. */
primary_vtbl = CLASSTYPE_VTABLES (ctype);
if (var_finalized_p (primary_vtbl))
return false;
/* Ignore dummy vtables made by get_vtable_decl. */
if (TREE_TYPE (primary_vtbl) == void_type_node)
return false;
/* On some targets, we cannot determine the key method until the end
of the translation unit -- which is when this function is
called. */
if (!targetm.cxx.key_method_may_be_inline ())
determine_key_method (ctype);
/* See if any of the vtables are needed. */
for (vtbl = CLASSTYPE_VTABLES (ctype); vtbl; vtbl = DECL_CHAIN (vtbl))
{
import_export_decl (vtbl);
if (DECL_NOT_REALLY_EXTERN (vtbl) && decl_needed_p (vtbl))
needed = 1;
}
if (!needed)
{
/* If the references to this class' vtables are optimized away,
still emit the appropriate debugging information. See
dfs_debug_mark. */
if (DECL_COMDAT (primary_vtbl)
&& CLASSTYPE_DEBUG_REQUESTED (ctype))
note_debug_info_needed (ctype);
return false;
}
/* The ABI requires that we emit all of the vtables if we emit any
of them. */
for (vtbl = CLASSTYPE_VTABLES (ctype); vtbl; vtbl = DECL_CHAIN (vtbl))
{
/* Mark entities references from the virtual table as used. */
mark_vtable_entries (vtbl, consteval_vtables);
if (TREE_TYPE (DECL_INITIAL (vtbl)) == 0)
{
vec<tree, va_gc> *cleanups = NULL;
tree expr = store_init_value (vtbl, DECL_INITIAL (vtbl), &cleanups,
LOOKUP_NORMAL);
/* It had better be all done at compile-time. */
gcc_assert (!expr && !cleanups);
}
/* Write it out. */
DECL_EXTERNAL (vtbl) = 0;
rest_of_decl_compilation (vtbl, 1, 1);
/* Because we're only doing syntax-checking, we'll never end up
actually marking the variable as written. */
if (flag_syntax_only)
TREE_ASM_WRITTEN (vtbl) = 1;
else if (DECL_ONE_ONLY (vtbl))
{
current = varpool_node::get_create (vtbl);
if (last)
current->add_to_same_comdat_group (last);
last = current;
}
}
/* For abstract classes, the destructor has been removed from the
vtable (in class.c's build_vtbl_initializer). For a compiler-
generated destructor, it hence might not have been generated in
this translation unit - and with '#pragma interface' it might
never get generated. */
if (CLASSTYPE_PURE_VIRTUALS (ctype)
&& TYPE_HAS_NONTRIVIAL_DESTRUCTOR (ctype)
&& !CLASSTYPE_LAZY_DESTRUCTOR (ctype)
&& DECL_DEFAULTED_IN_CLASS_P (CLASSTYPE_DESTRUCTOR (ctype)))
note_vague_linkage_fn (CLASSTYPE_DESTRUCTOR (ctype));
/* Since we're writing out the vtable here, also write the debug
info. */
note_debug_info_needed (ctype);
return true;
}
/* A special return value from type_visibility meaning internal
linkage. */
enum { VISIBILITY_ANON = VISIBILITY_INTERNAL+1 };
static int expr_visibility (tree);
static int type_visibility (tree);
/* walk_tree helper function for type_visibility. */
static tree
min_vis_r (tree *tp, int *walk_subtrees, void *data)
{
int *vis_p = (int *)data;
int this_vis = VISIBILITY_DEFAULT;
if (! TYPE_P (*tp))
*walk_subtrees = 0;
else if (typedef_variant_p (*tp))
/* Look through typedefs despite cp_walk_subtrees. */
this_vis = type_visibility (DECL_ORIGINAL_TYPE (TYPE_NAME (*tp)));
else if (OVERLOAD_TYPE_P (*tp)
&& !TREE_PUBLIC (TYPE_MAIN_DECL (*tp)))
{
this_vis = VISIBILITY_ANON;
*walk_subtrees = 0;
}
else if (CLASS_TYPE_P (*tp))
{
this_vis = CLASSTYPE_VISIBILITY (*tp);
*walk_subtrees = 0;
}
else if (TREE_CODE (*tp) == ARRAY_TYPE
&& uses_template_parms (TYPE_DOMAIN (*tp)))
this_vis = expr_visibility (TYPE_MAX_VALUE (TYPE_DOMAIN (*tp)));
if (this_vis > *vis_p)
*vis_p = this_vis;
return NULL;
}
/* walk_tree helper function for expr_visibility. */
static tree
min_vis_expr_r (tree *tp, int */*walk_subtrees*/, void *data)
{
int *vis_p = (int *)data;
int tpvis = VISIBILITY_DEFAULT;
switch (TREE_CODE (*tp))
{
case CAST_EXPR:
case IMPLICIT_CONV_EXPR:
case STATIC_CAST_EXPR:
case REINTERPRET_CAST_EXPR:
case CONST_CAST_EXPR:
case DYNAMIC_CAST_EXPR:
case NEW_EXPR:
case CONSTRUCTOR:
case LAMBDA_EXPR:
tpvis = type_visibility (TREE_TYPE (*tp));
break;
case VAR_DECL:
case FUNCTION_DECL:
if (! TREE_PUBLIC (*tp))
tpvis = VISIBILITY_ANON;
else
tpvis = DECL_VISIBILITY (*tp);
break;
default:
break;
}
if (tpvis > *vis_p)
*vis_p = tpvis;
return NULL_TREE;
}
/* Returns the visibility of TYPE, which is the minimum visibility of its
component types. */
static int
type_visibility (tree type)
{
int vis = VISIBILITY_DEFAULT;
cp_walk_tree_without_duplicates (&type, min_vis_r, &vis);
return vis;
}
/* Returns the visibility of an expression EXPR that appears in the signature
of a function template, which is the minimum visibility of names that appear
in its mangling. */
static int
expr_visibility (tree expr)
{
int vis = VISIBILITY_DEFAULT;
cp_walk_tree_without_duplicates (&expr, min_vis_expr_r, &vis);
return vis;
}
/* Limit the visibility of DECL to VISIBILITY, if not explicitly
specified (or if VISIBILITY is static). If TMPL is true, this
constraint is for a template argument, and takes precedence
over explicitly-specified visibility on the template. */
static void
constrain_visibility (tree decl, int visibility, bool tmpl)
{
if (visibility == VISIBILITY_ANON)
{
/* extern "C" declarations aren't affected by the anonymous
namespace. */
if (!DECL_EXTERN_C_P (decl))
{
TREE_PUBLIC (decl) = 0;
DECL_WEAK (decl) = 0;
DECL_COMMON (decl) = 0;
DECL_COMDAT (decl) = false;
if (VAR_OR_FUNCTION_DECL_P (decl))
{
struct symtab_node *snode = symtab_node::get (decl);
if (snode)
snode->set_comdat_group (NULL);
}
DECL_INTERFACE_KNOWN (decl) = 1;
if (DECL_LANG_SPECIFIC (decl))
DECL_NOT_REALLY_EXTERN (decl) = 1;
}
}
else if (visibility > DECL_VISIBILITY (decl)
&& (tmpl || !DECL_VISIBILITY_SPECIFIED (decl)))
{
DECL_VISIBILITY (decl) = (enum symbol_visibility) visibility;
/* This visibility was not specified. */
DECL_VISIBILITY_SPECIFIED (decl) = false;
}
}
/* Constrain the visibility of DECL based on the visibility of its template
arguments. */
static void
constrain_visibility_for_template (tree decl, tree targs)
{
/* If this is a template instantiation, check the innermost
template args for visibility constraints. The outer template
args are covered by the class check. */
tree args = INNERMOST_TEMPLATE_ARGS (targs);
int i;
for (i = TREE_VEC_LENGTH (args); i > 0; --i)
{
int vis = 0;
tree arg = TREE_VEC_ELT (args, i-1);
if (TYPE_P (arg))
vis = type_visibility (arg);
else
vis = expr_visibility (arg);
if (vis)
constrain_visibility (decl, vis, true);
}
}
/* Like c_determine_visibility, but with additional C++-specific
behavior.
Function-scope entities can rely on the function's visibility because
it is set in start_preparsed_function.
Class-scope entities cannot rely on the class's visibility until the end
of the enclosing class definition.
Note that because namespaces have multiple independent definitions,
namespace visibility is handled elsewhere using the #pragma visibility
machinery rather than by decorating the namespace declaration.
The goal is for constraints from the type to give a diagnostic, and
other constraints to be applied silently. */
void
determine_visibility (tree decl)
{
/* Remember that all decls get VISIBILITY_DEFAULT when built. */
/* Only relevant for names with external linkage. */
if (!TREE_PUBLIC (decl))
return;
/* Cloned constructors and destructors get the same visibility as
the underlying function. That should be set up in
maybe_clone_body. */
gcc_assert (!DECL_CLONED_FUNCTION_P (decl));
bool orig_visibility_specified = DECL_VISIBILITY_SPECIFIED (decl);
enum symbol_visibility orig_visibility = DECL_VISIBILITY (decl);
/* The decl may be a template instantiation, which could influence
visibilty. */
tree template_decl = NULL_TREE;
if (TREE_CODE (decl) == TYPE_DECL)
{
if (CLASS_TYPE_P (TREE_TYPE (decl)))
{
if (CLASSTYPE_USE_TEMPLATE (TREE_TYPE (decl)))
template_decl = decl;
}
else if (TYPE_TEMPLATE_INFO (TREE_TYPE (decl)))
template_decl = decl;
}
else if (DECL_LANG_SPECIFIC (decl) && DECL_USE_TEMPLATE (decl))
template_decl = decl;
if (TREE_CODE (decl) == TYPE_DECL
&& LAMBDA_TYPE_P (TREE_TYPE (decl))
&& CLASSTYPE_LAMBDA_EXPR (TREE_TYPE (decl)) != error_mark_node)
if (tree extra = LAMBDA_TYPE_EXTRA_SCOPE (TREE_TYPE (decl)))
{
/* The lambda's visibility is limited by that of its extra
scope. */
int vis = 0;
if (TYPE_P (extra))
vis = type_visibility (extra);
else
vis = expr_visibility (extra);
constrain_visibility (decl, vis, false);
}
/* If DECL is a member of a class, visibility specifiers on the
class can influence the visibility of the DECL. */
tree class_type = NULL_TREE;
if (DECL_CLASS_SCOPE_P (decl))
class_type = DECL_CONTEXT (decl);
else
{
/* Not a class member. */
/* Virtual tables have DECL_CONTEXT set to their associated class,
so they are automatically handled above. */
gcc_assert (!VAR_P (decl)
|| !DECL_VTABLE_OR_VTT_P (decl));
if (DECL_FUNCTION_SCOPE_P (decl) && ! DECL_VISIBILITY_SPECIFIED (decl))
{
/* Local statics and classes get the visibility of their
containing function by default, except that
-fvisibility-inlines-hidden doesn't affect them. */
tree fn = DECL_CONTEXT (decl);
if (DECL_VISIBILITY_SPECIFIED (fn))
{
DECL_VISIBILITY (decl) = DECL_VISIBILITY (fn);
DECL_VISIBILITY_SPECIFIED (decl) =
DECL_VISIBILITY_SPECIFIED (fn);
}
else
{
if (DECL_CLASS_SCOPE_P (fn))
determine_visibility_from_class (decl, DECL_CONTEXT (fn));
else if (determine_hidden_inline (fn))
{
DECL_VISIBILITY (decl) = default_visibility;
DECL_VISIBILITY_SPECIFIED (decl) =
visibility_options.inpragma;
}
else
{
DECL_VISIBILITY (decl) = DECL_VISIBILITY (fn);
DECL_VISIBILITY_SPECIFIED (decl) =
DECL_VISIBILITY_SPECIFIED (fn);
}
}
/* Local classes in templates have CLASSTYPE_USE_TEMPLATE set,
but have no TEMPLATE_INFO, so don't try to check it. */
template_decl = NULL_TREE;
}
else if (VAR_P (decl) && DECL_TINFO_P (decl)
&& flag_visibility_ms_compat)
{
/* Under -fvisibility-ms-compat, types are visible by default,
even though their contents aren't. */
tree underlying_type = TREE_TYPE (DECL_NAME (decl));
int underlying_vis = type_visibility (underlying_type);
if (underlying_vis == VISIBILITY_ANON
|| (CLASS_TYPE_P (underlying_type)
&& CLASSTYPE_VISIBILITY_SPECIFIED (underlying_type)))
constrain_visibility (decl, underlying_vis, false);
else
DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT;
}
else if (VAR_P (decl) && DECL_TINFO_P (decl))
{
/* tinfo visibility is based on the type it's for. */
constrain_visibility
(decl, type_visibility (TREE_TYPE (DECL_NAME (decl))), false);
/* Give the target a chance to override the visibility associated
with DECL. */
if (TREE_PUBLIC (decl)
&& !DECL_REALLY_EXTERN (decl)
&& CLASS_TYPE_P (TREE_TYPE (DECL_NAME (decl)))
&& !CLASSTYPE_VISIBILITY_SPECIFIED (TREE_TYPE (DECL_NAME (decl))))
targetm.cxx.determine_class_data_visibility (decl);
}
else if (template_decl)
/* Template instantiations and specializations get visibility based
on their template unless they override it with an attribute. */;
else if (! DECL_VISIBILITY_SPECIFIED (decl))
{
if (determine_hidden_inline (decl))
DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
else
{
/* Set default visibility to whatever the user supplied with
#pragma GCC visibility or a namespace visibility attribute. */
DECL_VISIBILITY (decl) = default_visibility;
DECL_VISIBILITY_SPECIFIED (decl) = visibility_options.inpragma;
}
}
}
if (template_decl)
{
/* If the specialization doesn't specify visibility, use the
visibility from the template. */
tree tinfo = get_template_info (template_decl);
tree args = TI_ARGS (tinfo);
tree attribs = (TREE_CODE (decl) == TYPE_DECL
? TYPE_ATTRIBUTES (TREE_TYPE (decl))
: DECL_ATTRIBUTES (decl));
tree attr = lookup_attribute ("visibility", attribs);
if (args != error_mark_node)
{
tree pattern = DECL_TEMPLATE_RESULT (TI_TEMPLATE (tinfo));
if (!DECL_VISIBILITY_SPECIFIED (decl))
{
if (!attr
&& determine_hidden_inline (decl))
DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
else
{
DECL_VISIBILITY (decl) = DECL_VISIBILITY (pattern);
DECL_VISIBILITY_SPECIFIED (decl)
= DECL_VISIBILITY_SPECIFIED (pattern);
}
}
if (args
/* Template argument visibility outweighs #pragma or namespace
visibility, but not an explicit attribute. */
&& !attr)
{
int depth = TMPL_ARGS_DEPTH (args);
if (DECL_VISIBILITY_SPECIFIED (decl))
{
/* A class template member with explicit visibility
overrides the class visibility, so we need to apply
all the levels of template args directly. */
int i;
for (i = 1; i <= depth; ++i)
{
tree lev = TMPL_ARGS_LEVEL (args, i);
constrain_visibility_for_template (decl, lev);
}
}
else if (PRIMARY_TEMPLATE_P (TI_TEMPLATE (tinfo)))
/* Limit visibility based on its template arguments. */
constrain_visibility_for_template (decl, args);
}
}
}
if (class_type)
determine_visibility_from_class (decl, class_type);
if (decl_anon_ns_mem_p (decl))
/* Names in an anonymous namespace get internal linkage. */
constrain_visibility (decl, VISIBILITY_ANON, false);
else if (TREE_CODE (decl) != TYPE_DECL)
{
/* Propagate anonymity from type to decl. */
int tvis = type_visibility (TREE_TYPE (decl));
if (tvis == VISIBILITY_ANON
|| ! DECL_VISIBILITY_SPECIFIED (decl))
constrain_visibility (decl, tvis, false);
}
else if (no_linkage_check (TREE_TYPE (decl), /*relaxed_p=*/true))
/* DR 757: A type without linkage shall not be used as the type of a
variable or function with linkage, unless
o the variable or function has extern "C" linkage (7.5 [dcl.link]), or
o the variable or function is not used (3.2 [basic.def.odr]) or is
defined in the same translation unit.
Since non-extern "C" decls need to be defined in the same
translation unit, we can make the type internal. */
constrain_visibility (decl, VISIBILITY_ANON, false);
/* If visibility changed and DECL already has DECL_RTL, ensure
symbol flags are updated. */
if ((DECL_VISIBILITY (decl) != orig_visibility
|| DECL_VISIBILITY_SPECIFIED (decl) != orig_visibility_specified)
&& ((VAR_P (decl) && TREE_STATIC (decl))
|| TREE_CODE (decl) == FUNCTION_DECL)
&& DECL_RTL_SET_P (decl))
make_decl_rtl (decl);
}
/* By default, static data members and function members receive
the visibility of their containing class. */
static void
determine_visibility_from_class (tree decl, tree class_type)
{
if (DECL_VISIBILITY_SPECIFIED (decl))
return;
if (determine_hidden_inline (decl))
DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
else
{
/* Default to the class visibility. */
DECL_VISIBILITY (decl) = CLASSTYPE_VISIBILITY (class_type);
DECL_VISIBILITY_SPECIFIED (decl)
= CLASSTYPE_VISIBILITY_SPECIFIED (class_type);
}
/* Give the target a chance to override the visibility associated
with DECL. */
if (VAR_P (decl)
&& TREE_PUBLIC (decl)
&& (DECL_TINFO_P (decl) || DECL_VTABLE_OR_VTT_P (decl))
&& !DECL_REALLY_EXTERN (decl)
&& !CLASSTYPE_VISIBILITY_SPECIFIED (class_type))
targetm.cxx.determine_class_data_visibility (decl);
}
/* Returns true iff DECL is an inline that should get hidden visibility
because of -fvisibility-inlines-hidden. */
static bool
determine_hidden_inline (tree decl)
{
return (visibility_options.inlines_hidden
/* Don't do this for inline templates; specializations might not be
inline, and we don't want them to inherit the hidden
visibility. We'll set it here for all inline instantiations. */
&& !processing_template_decl
&& TREE_CODE (decl) == FUNCTION_DECL
&& DECL_DECLARED_INLINE_P (decl)
&& (! DECL_LANG_SPECIFIC (decl)
|| ! DECL_EXPLICIT_INSTANTIATION (decl)));
}
/* Constrain the visibility of a class TYPE based on the visibility of its
field types. Warn if any fields require lesser visibility. */
void
constrain_class_visibility (tree type)
{
tree binfo;
tree t;
int i;
int vis = type_visibility (type);
if (vis == VISIBILITY_ANON
|| DECL_IN_SYSTEM_HEADER (TYPE_MAIN_DECL (type)))
return;
/* Don't warn about visibility if the class has explicit visibility. */
if (CLASSTYPE_VISIBILITY_SPECIFIED (type))
vis = VISIBILITY_INTERNAL;
for (t = TYPE_FIELDS (type); t; t = DECL_CHAIN (t))
if (TREE_CODE (t) == FIELD_DECL && TREE_TYPE (t) != error_mark_node
&& !DECL_ARTIFICIAL (t))
{
tree ftype = strip_pointer_or_array_types (TREE_TYPE (t));
int subvis = type_visibility (ftype);
if (subvis == VISIBILITY_ANON)
{
if (!in_main_input_context())
{
tree nlt = no_linkage_check (ftype, /*relaxed_p=*/false);
if (nlt)
{
if (same_type_p (TREE_TYPE (t), nlt))
warning (OPT_Wsubobject_linkage, "\
%qT has a field %qD whose type has no linkage",
type, t);
else
warning (OPT_Wsubobject_linkage, "\
%qT has a field %qD whose type depends on the type %qT which has no linkage",
type, t, nlt);
}
else
warning (OPT_Wsubobject_linkage, "\
%qT has a field %qD whose type uses the anonymous namespace",
type, t);
}
}
else if (MAYBE_CLASS_TYPE_P (ftype)
&& vis < VISIBILITY_HIDDEN
&& subvis >= VISIBILITY_HIDDEN)
warning (OPT_Wattributes, "\
%qT declared with greater visibility than the type of its field %qD",
type, t);
}
binfo = TYPE_BINFO (type);
for (i = 0; BINFO_BASE_ITERATE (binfo, i, t); ++i)
{
int subvis = type_visibility (TREE_TYPE (t));
if (subvis == VISIBILITY_ANON)
{
if (!in_main_input_context())
{
tree nlt = no_linkage_check (TREE_TYPE (t), /*relaxed_p=*/false);
if (nlt)
{
if (same_type_p (TREE_TYPE (t), nlt))
warning (OPT_Wsubobject_linkage, "\
%qT has a base %qT whose type has no linkage",
type, TREE_TYPE (t));
else
warning (OPT_Wsubobject_linkage, "\
%qT has a base %qT whose type depends on the type %qT which has no linkage",
type, TREE_TYPE (t), nlt);
}
else
warning (OPT_Wsubobject_linkage, "\
%qT has a base %qT whose type uses the anonymous namespace",
type, TREE_TYPE (t));
}
}
else if (vis < VISIBILITY_HIDDEN
&& subvis >= VISIBILITY_HIDDEN)
warning (OPT_Wattributes, "\
%qT declared with greater visibility than its base %qT",
type, TREE_TYPE (t));
}
}
/* Functions for adjusting the visibility of a tagged type and its nested
types and declarations when it gets a name for linkage purposes from a
typedef. */
static void bt_reset_linkage_1 (binding_entry, void *);
static void bt_reset_linkage_2 (binding_entry, void *);
/* First reset the visibility of all the types. */
static void
reset_type_linkage_1 (tree type)
{
set_linkage_according_to_type (type, TYPE_MAIN_DECL (type));
if (CLASS_TYPE_P (type))
binding_table_foreach (CLASSTYPE_NESTED_UTDS (type),
bt_reset_linkage_1, NULL);
}
static void
bt_reset_linkage_1 (binding_entry b, void */*data*/)
{
reset_type_linkage_1 (b->type);
}
/* Then reset the visibility of any static data members or member
functions that use those types. */
static void
reset_decl_linkage (tree decl)
{
if (TREE_PUBLIC (decl))
return;
if (DECL_CLONED_FUNCTION_P (decl))
return;
TREE_PUBLIC (decl) = true;
DECL_INTERFACE_KNOWN (decl) = false;
determine_visibility (decl);
tentative_decl_linkage (decl);
}
static void
reset_type_linkage_2 (tree type)
{
if (CLASS_TYPE_P (type))
{
if (tree vt = CLASSTYPE_VTABLES (type))
{
tree name = mangle_vtbl_for_type (type);
DECL_NAME (vt) = name;
SET_DECL_ASSEMBLER_NAME (vt, name);
reset_decl_linkage (vt);
}
if (tree ti = CLASSTYPE_TYPEINFO_VAR (type))
{
tree name = mangle_typeinfo_for_type (type);
DECL_NAME (ti) = name;
SET_DECL_ASSEMBLER_NAME (ti, name);
TREE_TYPE (name) = type;
reset_decl_linkage (ti);
}
for (tree m = TYPE_FIELDS (type); m; m = DECL_CHAIN (m))
{
tree mem = STRIP_TEMPLATE (m);
if (TREE_CODE (mem) == VAR_DECL || TREE_CODE (mem) == FUNCTION_DECL)
reset_decl_linkage (mem);
}
binding_table_foreach (CLASSTYPE_NESTED_UTDS (type),
bt_reset_linkage_2, NULL);
}
}
static void
bt_reset_linkage_2 (binding_entry b, void */*data*/)
{
reset_type_linkage_2 (b->type);
}
void
reset_type_linkage (tree type)
{
reset_type_linkage_1 (type);
reset_type_linkage_2 (type);
}
/* Set up our initial idea of what the linkage of DECL should be. */
void
tentative_decl_linkage (tree decl)
{
if (DECL_INTERFACE_KNOWN (decl))
/* We've already made a decision as to how this function will
be handled. */;
else if (vague_linkage_p (decl))
{
if (TREE_CODE (decl) == FUNCTION_DECL
&& decl_defined_p (decl))
{
DECL_EXTERNAL (decl) = 1;
DECL_NOT_REALLY_EXTERN (decl) = 1;
note_vague_linkage_fn (decl);
/* A non-template inline function with external linkage will
always be COMDAT. As we must eventually determine the
linkage of all functions, and as that causes writes to
the data mapped in from the PCH file, it's advantageous
to mark the functions at this point. */
if (DECL_DECLARED_INLINE_P (decl)
&& (!DECL_IMPLICIT_INSTANTIATION (decl)
|| DECL_DEFAULTED_FN (decl)))
{
/* This function must have external linkage, as
otherwise DECL_INTERFACE_KNOWN would have been
set. */
gcc_assert (TREE_PUBLIC (decl));
comdat_linkage (decl);
DECL_INTERFACE_KNOWN (decl) = 1;
}
}
else if (VAR_P (decl))
maybe_commonize_var (decl);
}
}
/* DECL is a FUNCTION_DECL or VAR_DECL. If the object file linkage
for DECL has not already been determined, do so now by setting
DECL_EXTERNAL, DECL_COMDAT and other related flags. Until this
function is called entities with vague linkage whose definitions
are available must have TREE_PUBLIC set.
If this function decides to place DECL in COMDAT, it will set
appropriate flags -- but will not clear DECL_EXTERNAL. It is up to
the caller to decide whether or not to clear DECL_EXTERNAL. Some
callers defer that decision until it is clear that DECL is actually
required. */
void
import_export_decl (tree decl)
{
bool comdat_p;
bool import_p;
tree class_type = NULL_TREE;
if (DECL_INTERFACE_KNOWN (decl))
return;
/* We cannot determine what linkage to give to an entity with vague
linkage until the end of the file. For example, a virtual table
for a class will be defined if and only if the key method is
defined in this translation unit. */
gcc_assert (at_eof);
/* Object file linkage for explicit instantiations is handled in
mark_decl_instantiated. For static variables in functions with
vague linkage, maybe_commonize_var is used.
Therefore, the only declarations that should be provided to this
function are those with external linkage that are:
* implicit instantiations of function templates
* inline function
* implicit instantiations of static data members of class
templates
* virtual tables
* typeinfo objects
Furthermore, all entities that reach this point must have a
definition available in this translation unit.
The following assertions check these conditions. */
gcc_assert (VAR_OR_FUNCTION_DECL_P (decl));
/* Any code that creates entities with TREE_PUBLIC cleared should
also set DECL_INTERFACE_KNOWN. */
gcc_assert (TREE_PUBLIC (decl));
if (TREE_CODE (decl) == FUNCTION_DECL)
gcc_assert (DECL_IMPLICIT_INSTANTIATION (decl)
|| DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (decl)
|| DECL_DECLARED_INLINE_P (decl));
else
gcc_assert (DECL_IMPLICIT_INSTANTIATION (decl)
|| DECL_VTABLE_OR_VTT_P (decl)
|| DECL_TINFO_P (decl));
/* Check that a definition of DECL is available in this translation
unit. */
gcc_assert (!DECL_REALLY_EXTERN (decl));
/* Assume that DECL will not have COMDAT linkage. */
comdat_p = false;
/* Assume that DECL will not be imported into this translation
unit. */
import_p = false;
if (VAR_P (decl) && DECL_VTABLE_OR_VTT_P (decl))
{
class_type = DECL_CONTEXT (decl);
import_export_class (class_type);
if (CLASSTYPE_INTERFACE_KNOWN (class_type)
&& CLASSTYPE_INTERFACE_ONLY (class_type))
import_p = true;
else if ((!flag_weak || TARGET_WEAK_NOT_IN_ARCHIVE_TOC)
&& !CLASSTYPE_USE_TEMPLATE (class_type)
&& CLASSTYPE_KEY_METHOD (class_type)
&& !DECL_DECLARED_INLINE_P (CLASSTYPE_KEY_METHOD (class_type)))
/* The ABI requires that all virtual tables be emitted with
COMDAT linkage. However, on systems where COMDAT symbols
don't show up in the table of contents for a static
archive, or on systems without weak symbols (where we
approximate COMDAT linkage by using internal linkage), the
linker will report errors about undefined symbols because
it will not see the virtual table definition. Therefore,
in the case that we know that the virtual table will be
emitted in only one translation unit, we make the virtual
table an ordinary definition with external linkage. */
DECL_EXTERNAL (decl) = 0;
else if (CLASSTYPE_INTERFACE_KNOWN (class_type))
{
/* CLASS_TYPE is being exported from this translation unit,
so DECL should be defined here. */
if (!flag_weak && CLASSTYPE_EXPLICIT_INSTANTIATION (class_type))
/* If a class is declared in a header with the "extern
template" extension, then it will not be instantiated,
even in translation units that would normally require
it. Often such classes are explicitly instantiated in
one translation unit. Therefore, the explicit
instantiation must be made visible to other translation
units. */
DECL_EXTERNAL (decl) = 0;
else
{
/* The generic C++ ABI says that class data is always
COMDAT, even if there is a key function. Some
variants (e.g., the ARM EABI) says that class data
only has COMDAT linkage if the class data might be
emitted in more than one translation unit. When the
key method can be inline and is inline, we still have
to arrange for comdat even though
class_data_always_comdat is false. */
if (!CLASSTYPE_KEY_METHOD (class_type)
|| DECL_DECLARED_INLINE_P (CLASSTYPE_KEY_METHOD (class_type))
|| targetm.cxx.class_data_always_comdat ())
{
/* The ABI requires COMDAT linkage. Normally, we
only emit COMDAT things when they are needed;
make sure that we realize that this entity is
indeed needed. */
comdat_p = true;
mark_needed (decl);
}
}
}
else if (!flag_implicit_templates
&& CLASSTYPE_IMPLICIT_INSTANTIATION (class_type))
import_p = true;
else
comdat_p = true;
}
else if (VAR_P (decl) && DECL_TINFO_P (decl))
{
tree type = TREE_TYPE (DECL_NAME (decl));
if (CLASS_TYPE_P (type))
{
class_type = type;
import_export_class (type);
if (CLASSTYPE_INTERFACE_KNOWN (type)
&& TYPE_POLYMORPHIC_P (type)
&& CLASSTYPE_INTERFACE_ONLY (type)
/* If -fno-rtti was specified, then we cannot be sure
that RTTI information will be emitted with the
virtual table of the class, so we must emit it
wherever it is used. */
&& flag_rtti)
import_p = true;
else
{
if (CLASSTYPE_INTERFACE_KNOWN (type)
&& !CLASSTYPE_INTERFACE_ONLY (type))
{
comdat_p = (targetm.cxx.class_data_always_comdat ()
|| (CLASSTYPE_KEY_METHOD (type)
&& DECL_DECLARED_INLINE_P (CLASSTYPE_KEY_METHOD (type))));
mark_needed (decl);
if (!flag_weak)
{
comdat_p = false;
DECL_EXTERNAL (decl) = 0;
}
}
else
comdat_p = true;
}
}
else
comdat_p = true;
}
else if (DECL_TEMPLOID_INSTANTIATION (decl))
{
/* DECL is an implicit instantiation of a function or static
data member. */
if (flag_implicit_templates
|| (flag_implicit_inline_templates
&& TREE_CODE (decl) == FUNCTION_DECL
&& DECL_DECLARED_INLINE_P (decl)))
comdat_p = true;
else
/* If we are not implicitly generating templates, then mark
this entity as undefined in this translation unit. */
import_p = true;
}
else if (DECL_FUNCTION_MEMBER_P (decl))
{
if (!DECL_DECLARED_INLINE_P (decl))
{
tree ctype = DECL_CONTEXT (decl);
import_export_class (ctype);
if (CLASSTYPE_INTERFACE_KNOWN (ctype))
{
DECL_NOT_REALLY_EXTERN (decl)
= ! (CLASSTYPE_INTERFACE_ONLY (ctype)
|| (DECL_DECLARED_INLINE_P (decl)
&& ! flag_implement_inlines
&& !DECL_VINDEX (decl)));
if (!DECL_NOT_REALLY_EXTERN (decl))
DECL_EXTERNAL (decl) = 1;
/* Always make artificials weak. */
if (DECL_ARTIFICIAL (decl) && flag_weak)
comdat_p = true;
else
maybe_make_one_only (decl);
}
}
else
comdat_p = true;
}
else
comdat_p = true;
if (import_p)
{
/* If we are importing DECL into this translation unit, mark is
an undefined here. */
DECL_EXTERNAL (decl) = 1;
DECL_NOT_REALLY_EXTERN (decl) = 0;
}
else if (comdat_p)
{
/* If we decided to put DECL in COMDAT, mark it accordingly at
this point. */
comdat_linkage (decl);
}
DECL_INTERFACE_KNOWN (decl) = 1;
}
/* Return an expression that performs the destruction of DECL, which
must be a VAR_DECL whose type has a non-trivial destructor, or is
an array whose (innermost) elements have a non-trivial destructor. */
tree
build_cleanup (tree decl)
{
tree clean = cxx_maybe_build_cleanup (decl, tf_warning_or_error);
gcc_assert (clean != NULL_TREE);
return clean;
}
/* GUARD is a helper variable for DECL; make them have the same linkage and
visibility. */
void
copy_linkage (tree guard, tree decl)
{
TREE_PUBLIC (guard) = TREE_PUBLIC (decl);
TREE_STATIC (guard) = TREE_STATIC (decl);
DECL_COMMON (guard) = DECL_COMMON (decl);
DECL_COMDAT (guard) = DECL_COMDAT (decl);
if (TREE_STATIC (guard))
{
CP_DECL_THREAD_LOCAL_P (guard) = CP_DECL_THREAD_LOCAL_P (decl);
set_decl_tls_model (guard, DECL_TLS_MODEL (decl));
if (DECL_ONE_ONLY (decl))
make_decl_one_only (guard, cxx_comdat_group (guard));
if (TREE_PUBLIC (decl))
DECL_WEAK (guard) = DECL_WEAK (decl);
/* Also check vague_linkage_p, as DECL_WEAK and DECL_ONE_ONLY might not
be set until import_export_decl at EOF. */
if (vague_linkage_p (decl))
comdat_linkage (guard);
DECL_VISIBILITY (guard) = DECL_VISIBILITY (decl);
DECL_VISIBILITY_SPECIFIED (guard) = DECL_VISIBILITY_SPECIFIED (decl);
}
}
/* Returns the initialization guard variable for the variable DECL,
which has static storage duration. */
tree
get_guard (tree decl)
{
tree sname = mangle_guard_variable (decl);
tree guard = get_global_binding (sname);
if (! guard)
{
tree guard_type;
/* We use a type that is big enough to contain a mutex as well
as an integer counter. */
guard_type = targetm.cxx.guard_type ();
guard = build_decl (DECL_SOURCE_LOCATION (decl),
VAR_DECL, sname, guard_type);
/* The guard should have the same linkage as what it guards. */
copy_linkage (guard, decl);
DECL_ARTIFICIAL (guard) = 1;
DECL_IGNORED_P (guard) = 1;
TREE_USED (guard) = 1;
pushdecl_top_level_and_finish (guard, NULL_TREE);
}
return guard;
}
/* Return an atomic load of src with the appropriate memory model. */
static tree
build_atomic_load_byte (tree src, HOST_WIDE_INT model)
{
tree ptr_type = build_pointer_type (char_type_node);
tree mem_model = build_int_cst (integer_type_node, model);
tree t, addr, val;
unsigned int size;
int fncode;
size = tree_to_uhwi (TYPE_SIZE_UNIT (char_type_node));
fncode = BUILT_IN_ATOMIC_LOAD_N + exact_log2 (size) + 1;
t = builtin_decl_implicit ((enum built_in_function) fncode);
addr = build1 (ADDR_EXPR, ptr_type, src);
val = build_call_expr (t, 2, addr, mem_model);
return val;
}
/* Return those bits of the GUARD variable that should be set when the
guarded entity is actually initialized. */
static tree
get_guard_bits (tree guard)
{
if (!targetm.cxx.guard_mask_bit ())
{
/* We only set the first byte of the guard, in order to leave room
for a mutex in the high-order bits. */
guard = build1 (ADDR_EXPR,
build_pointer_type (TREE_TYPE (guard)),
guard);
guard = build1 (NOP_EXPR,
build_pointer_type (char_type_node),
guard);
guard = build1 (INDIRECT_REF, char_type_node, guard);
}
return guard;
}
/* Return an expression which determines whether or not the GUARD
variable has already been initialized. */
tree
get_guard_cond (tree guard, bool thread_safe)
{
tree guard_value;
if (!thread_safe)
guard = get_guard_bits (guard);
else
guard = build_atomic_load_byte (guard, MEMMODEL_ACQUIRE);
/* Mask off all but the low bit. */
if (targetm.cxx.guard_mask_bit ())
{
guard_value = integer_one_node;
if (!same_type_p (TREE_TYPE (guard_value), TREE_TYPE (guard)))
guard_value = fold_convert (TREE_TYPE (guard), guard_value);
guard = cp_build_binary_op (input_location,
BIT_AND_EXPR, guard, guard_value,
tf_warning_or_error);
}
guard_value = integer_zero_node;
if (!same_type_p (TREE_TYPE (guard_value), TREE_TYPE (guard)))
guard_value = fold_convert (TREE_TYPE (guard), guard_value);
return cp_build_binary_op (input_location,
EQ_EXPR, guard, guard_value,
tf_warning_or_error);
}
/* Return an expression which sets the GUARD variable, indicating that
the variable being guarded has been initialized. */
tree
set_guard (tree guard)
{
tree guard_init;
/* Set the GUARD to one. */
guard = get_guard_bits (guard);
guard_init = integer_one_node;
if (!same_type_p (TREE_TYPE (guard_init), TREE_TYPE (guard)))
guard_init = fold_convert (TREE_TYPE (guard), guard_init);
return cp_build_modify_expr (input_location, guard, NOP_EXPR, guard_init,
tf_warning_or_error);
}
/* Returns true iff we can tell that VAR does not have a dynamic
initializer. */
static bool
var_defined_without_dynamic_init (tree var)
{
/* If it's defined in another TU, we can't tell. */
if (DECL_EXTERNAL (var))
return false;
/* If it has a non-trivial destructor, registering the destructor
counts as dynamic initialization. */
if (TYPE_HAS_NONTRIVIAL_DESTRUCTOR (TREE_TYPE (var)))
return false;
/* If it's in this TU, its initializer has been processed, unless
it's a case of self-initialization, then DECL_INITIALIZED_P is
false while the initializer is handled by finish_id_expression. */
if (!DECL_INITIALIZED_P (var))
return false;
/* If it has no initializer or a constant one, it's not dynamic. */
return (!DECL_NONTRIVIALLY_INITIALIZED_P (var)
|| DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (var));
}
/* Returns true iff VAR is a variable that needs uses to be
wrapped for possible dynamic initialization. */
static bool
var_needs_tls_wrapper (tree var)
{
return (!error_operand_p (var)
&& CP_DECL_THREAD_LOCAL_P (var)
&& !DECL_GNU_TLS_P (var)
&& !DECL_FUNCTION_SCOPE_P (var)
&& !var_defined_without_dynamic_init (var));
}
/* Get the FUNCTION_DECL for the shared TLS init function for this
translation unit. */
static tree
get_local_tls_init_fn (location_t loc)
{
tree sname = get_identifier ("__tls_init");
tree fn = get_global_binding (sname);
if (!fn)
{
fn = build_lang_decl_loc (loc, FUNCTION_DECL, sname,
build_function_type (void_type_node,
void_list_node));
SET_DECL_LANGUAGE (fn, lang_c);
TREE_PUBLIC (fn) = false;
DECL_ARTIFICIAL (fn) = true;
mark_used (fn);
set_global_binding (fn);
}
return fn;
}
/* Get a FUNCTION_DECL for the init function for the thread_local
variable VAR. The init function will be an alias to the function
that initializes all the non-local TLS variables in the translation
unit. The init function is only used by the wrapper function. */
static tree
get_tls_init_fn (tree var)
{
/* Only C++11 TLS vars need this init fn. */
if (!var_needs_tls_wrapper (var))
return NULL_TREE;
/* If -fno-extern-tls-init, assume that we don't need to call
a tls init function for a variable defined in another TU. */
if (!flag_extern_tls_init && DECL_EXTERNAL (var))
return NULL_TREE;
/* If the variable is internal, or if we can't generate aliases,
call the local init function directly. */
if (!TREE_PUBLIC (var) || !TARGET_SUPPORTS_ALIASES)
return get_local_tls_init_fn (DECL_SOURCE_LOCATION (var));
tree sname = mangle_tls_init_fn (var);
tree fn = get_global_binding (sname);
if (!fn)
{
fn = build_lang_decl (FUNCTION_DECL, sname,
build_function_type (void_type_node,
void_list_node));
SET_DECL_LANGUAGE (fn, lang_c);
TREE_PUBLIC (fn) = TREE_PUBLIC (var);
DECL_ARTIFICIAL (fn) = true;
DECL_COMDAT (fn) = DECL_COMDAT (var);
DECL_EXTERNAL (fn) = DECL_EXTERNAL (var);
if (DECL_ONE_ONLY (var))
make_decl_one_only (fn, cxx_comdat_group (fn));
if (TREE_PUBLIC (var))
{
tree obtype = strip_array_types (non_reference (TREE_TYPE (var)));
/* If the variable is defined somewhere else and might have static
initialization, make the init function a weak reference. */
if ((!TYPE_NEEDS_CONSTRUCTING (obtype)
|| TYPE_HAS_CONSTEXPR_CTOR (obtype)
|| TYPE_HAS_TRIVIAL_DFLT (obtype))
&& TYPE_HAS_TRIVIAL_DESTRUCTOR (obtype)
&& DECL_EXTERNAL (var))
declare_weak (fn);
else
DECL_WEAK (fn) = DECL_WEAK (var);
}
DECL_VISIBILITY (fn) = DECL_VISIBILITY (var);
DECL_VISIBILITY_SPECIFIED (fn) = DECL_VISIBILITY_SPECIFIED (var);
DECL_DLLIMPORT_P (fn) = DECL_DLLIMPORT_P (var);
DECL_IGNORED_P (fn) = 1;
mark_used (fn);
DECL_BEFRIENDING_CLASSES (fn) = var;
set_global_binding (fn);
}
return fn;
}
/* Get a FUNCTION_DECL for the init wrapper function for the thread_local
variable VAR. The wrapper function calls the init function (if any) for
VAR and then returns a reference to VAR. The wrapper function is used
in place of VAR everywhere VAR is mentioned. */
static tree
get_tls_wrapper_fn (tree var)
{
/* Only C++11 TLS vars need this wrapper fn. */
if (!var_needs_tls_wrapper (var))
return NULL_TREE;
tree sname = mangle_tls_wrapper_fn (var);
tree fn = get_global_binding (sname);
if (!fn)
{
/* A named rvalue reference is an lvalue, so the wrapper should
always return an lvalue reference. */
tree type = non_reference (TREE_TYPE (var));
type = build_reference_type (type);
tree fntype = build_function_type (type, void_list_node);
fn = build_lang_decl_loc (DECL_SOURCE_LOCATION (var),
FUNCTION_DECL, sname, fntype);
SET_DECL_LANGUAGE (fn, lang_c);
TREE_PUBLIC (fn) = TREE_PUBLIC (var);
DECL_ARTIFICIAL (fn) = true;
DECL_IGNORED_P (fn) = 1;
/* The wrapper is inline and emitted everywhere var is used. */
DECL_DECLARED_INLINE_P (fn) = true;
if (TREE_PUBLIC (var))
{
comdat_linkage (fn);
#ifdef HAVE_GAS_HIDDEN
/* Make the wrapper bind locally; there's no reason to share
the wrapper between multiple shared objects. */
DECL_VISIBILITY (fn) = VISIBILITY_INTERNAL;
DECL_VISIBILITY_SPECIFIED (fn) = true;
#endif
}
if (!TREE_PUBLIC (fn))
DECL_INTERFACE_KNOWN (fn) = true;
mark_used (fn);
note_vague_linkage_fn (fn);
#if 0
/* We want CSE to commonize calls to the wrapper, but marking it as
pure is unsafe since it has side-effects. I guess we need a new
ECF flag even weaker than ECF_PURE. FIXME! */
DECL_PURE_P (fn) = true;
#endif
DECL_BEFRIENDING_CLASSES (fn) = var;
set_global_binding (fn);
}
return fn;
}
/* If EXPR is a thread_local variable that should be wrapped by init
wrapper function, return a call to that function, otherwise return
NULL. */
tree
maybe_get_tls_wrapper_call (tree expr)
{
if (VAR_P (expr)
&& !processing_template_decl
&& !cp_unevaluated_operand
&& CP_DECL_THREAD_LOCAL_P (expr))
if (tree wrap = get_tls_wrapper_fn (expr))
return build_cxx_call (wrap, 0, NULL, tf_warning_or_error);
return NULL;
}
/* At EOF, generate the definition for the TLS wrapper function FN:
T& var_wrapper() {
if (init_fn) init_fn();
return var;
} */
static void
generate_tls_wrapper (tree fn)
{
tree var = DECL_BEFRIENDING_CLASSES (fn);
start_preparsed_function (fn, NULL_TREE, SF_DEFAULT | SF_PRE_PARSED);
tree body = begin_function_body ();
/* Only call the init fn if there might be one. */
if (tree init_fn = get_tls_init_fn (var))
{
tree if_stmt = NULL_TREE;
/* If init_fn is a weakref, make sure it exists before calling. */
if (lookup_attribute ("weak", DECL_ATTRIBUTES (init_fn)))
{
if_stmt = begin_if_stmt ();
tree addr = cp_build_addr_expr (init_fn, tf_warning_or_error);
tree cond = cp_build_binary_op (DECL_SOURCE_LOCATION (var),
NE_EXPR, addr, nullptr_node,
tf_warning_or_error);
finish_if_stmt_cond (cond, if_stmt);
}
finish_expr_stmt (build_cxx_call
(init_fn, 0, NULL, tf_warning_or_error));
if (if_stmt)
{
finish_then_clause (if_stmt);
finish_if_stmt (if_stmt);
}
}
else
/* If there's no initialization, the wrapper is a constant function. */
TREE_READONLY (fn) = true;
finish_return_stmt (convert_from_reference (var));
finish_function_body (body);
expand_or_defer_fn (finish_function (/*inline_p=*/false));
}
/* Start the process of running a particular set of global constructors
or destructors. Subroutine of do_[cd]tors. Also called from
vtv_start_verification_constructor_init_function. */
static tree
start_objects (int method_type, int initp)
{
tree body;
tree fndecl;
char type[14];
/* Make ctor or dtor function. METHOD_TYPE may be 'I' or 'D'. */
if (initp != DEFAULT_INIT_PRIORITY)
{
char joiner;
#ifdef JOINER
joiner = JOINER;
#else
joiner = '_';
#endif
sprintf (type, "sub_%c%c%.5u", method_type, joiner, initp);
}
else
sprintf (type, "sub_%c", method_type);
fndecl = build_lang_decl (FUNCTION_DECL,
get_file_function_name (type),
build_function_type_list (void_type_node,
NULL_TREE));
start_preparsed_function (fndecl, /*attrs=*/NULL_TREE, SF_PRE_PARSED);
TREE_PUBLIC (current_function_decl) = 0;
/* Mark as artificial because it's not explicitly in the user's
source code. */
DECL_ARTIFICIAL (current_function_decl) = 1;
/* Mark this declaration as used to avoid spurious warnings. */
TREE_USED (current_function_decl) = 1;
/* Mark this function as a global constructor or destructor. */
if (method_type == 'I')
DECL_GLOBAL_CTOR_P (current_function_decl) = 1;
else
DECL_GLOBAL_DTOR_P (current_function_decl) = 1;
body = begin_compound_stmt (BCS_FN_BODY);
return body;
}
/* Finish the process of running a particular set of global constructors
or destructors. Subroutine of do_[cd]tors. */
static void
finish_objects (int method_type, int initp, tree body)
{
tree fn;
/* Finish up. */
finish_compound_stmt (body);
fn = finish_function (/*inline_p=*/false);
if (method_type == 'I')
{
DECL_STATIC_CONSTRUCTOR (fn) = 1;
decl_init_priority_insert (fn, initp);
}
else
{
DECL_STATIC_DESTRUCTOR (fn) = 1;
decl_fini_priority_insert (fn, initp);
}
expand_or_defer_fn (fn);
}
/* The names of the parameters to the function created to handle
initializations and destructions for objects with static storage
duration. */
#define INITIALIZE_P_IDENTIFIER "__initialize_p"
#define PRIORITY_IDENTIFIER "__priority"
/* The name of the function we create to handle initializations and
destructions for objects with static storage duration. */
#define SSDF_IDENTIFIER "__static_initialization_and_destruction"
/* The declaration for the __INITIALIZE_P argument. */
static GTY(()) tree initialize_p_decl;
/* The declaration for the __PRIORITY argument. */
static GTY(()) tree priority_decl;
/* The declaration for the static storage duration function. */
static GTY(()) tree ssdf_decl;
/* All the static storage duration functions created in this
translation unit. */
static GTY(()) vec<tree, va_gc> *ssdf_decls;
/* A map from priority levels to information about that priority
level. There may be many such levels, so efficient lookup is
important. */
static splay_tree priority_info_map;
/* Begins the generation of the function that will handle all
initialization and destruction of objects with static storage
duration. The function generated takes two parameters of type
`int': __INITIALIZE_P and __PRIORITY. If __INITIALIZE_P is
nonzero, it performs initializations. Otherwise, it performs
destructions. It only performs those initializations or
destructions with the indicated __PRIORITY. The generated function
returns no value.
It is assumed that this function will only be called once per
translation unit. */
static tree
start_static_storage_duration_function (unsigned count)
{
tree type;
tree body;
char id[sizeof (SSDF_IDENTIFIER) + 1 /* '\0' */ + 32];
/* Create the identifier for this function. It will be of the form
SSDF_IDENTIFIER_<number>. */
sprintf (id, "%s_%u", SSDF_IDENTIFIER, count);
type = build_function_type_list (void_type_node,
integer_type_node, integer_type_node,
NULL_TREE);
/* Create the FUNCTION_DECL itself. */
ssdf_decl = build_lang_decl (FUNCTION_DECL,
get_identifier (id),
type);
TREE_PUBLIC (ssdf_decl) = 0;
DECL_ARTIFICIAL (ssdf_decl) = 1;
/* Put this function in the list of functions to be called from the
static constructors and destructors. */
if (!ssdf_decls)
{
vec_alloc (ssdf_decls, 32);
/* Take this opportunity to initialize the map from priority
numbers to information about that priority level. */
priority_info_map = splay_tree_new (splay_tree_compare_ints,
/*delete_key_fn=*/0,
/*delete_value_fn=*/
splay_tree_delete_pointers);
/* We always need to generate functions for the
DEFAULT_INIT_PRIORITY so enter it now. That way when we walk
priorities later, we'll be sure to find the
DEFAULT_INIT_PRIORITY. */
get_priority_info (DEFAULT_INIT_PRIORITY);
}
vec_safe_push (ssdf_decls, ssdf_decl);
/* Create the argument list. */
initialize_p_decl = cp_build_parm_decl
(ssdf_decl, get_identifier (INITIALIZE_P_IDENTIFIER), integer_type_node);
TREE_USED (initialize_p_decl) = 1;
priority_decl = cp_build_parm_decl
(ssdf_decl, get_identifier (PRIORITY_IDENTIFIER), integer_type_node);
TREE_USED (priority_decl) = 1;
DECL_CHAIN (initialize_p_decl) = priority_decl;
DECL_ARGUMENTS (ssdf_decl) = initialize_p_decl;
/* Put the function in the global scope. */
pushdecl (ssdf_decl);
/* Start the function itself. This is equivalent to declaring the
function as:
static void __ssdf (int __initialize_p, init __priority_p);
It is static because we only need to call this function from the
various constructor and destructor functions for this module. */
start_preparsed_function (ssdf_decl,
/*attrs=*/NULL_TREE,
SF_PRE_PARSED);
/* Set up the scope of the outermost block in the function. */
body = begin_compound_stmt (BCS_FN_BODY);
return body;
}
/* Finish the generation of the function which performs initialization
and destruction of objects with static storage duration. After
this point, no more such objects can be created. */
static void
finish_static_storage_duration_function (tree body)
{
/* Close out the function. */
finish_compound_stmt (body);
expand_or_defer_fn (finish_function (/*inline_p=*/false));
}
/* Return the information about the indicated PRIORITY level. If no
code to handle this level has yet been generated, generate the
appropriate prologue. */
static priority_info
get_priority_info (int priority)
{
priority_info pi;
splay_tree_node n;
n = splay_tree_lookup (priority_info_map,
(splay_tree_key) priority);
if (!n)
{
/* Create a new priority information structure, and insert it
into the map. */
pi = XNEW (struct priority_info_s);
pi->initializations_p = 0;
pi->destructions_p = 0;
splay_tree_insert (priority_info_map,
(splay_tree_key) priority,
(splay_tree_value) pi);
}
else
pi = (priority_info) n->value;
return pi;
}
/* The effective initialization priority of a DECL. */
#define DECL_EFFECTIVE_INIT_PRIORITY(decl) \
((!DECL_HAS_INIT_PRIORITY_P (decl) || DECL_INIT_PRIORITY (decl) == 0) \
? DEFAULT_INIT_PRIORITY : DECL_INIT_PRIORITY (decl))
/* Whether a DECL needs a guard to protect it against multiple
initialization. */
#define NEEDS_GUARD_P(decl) (TREE_PUBLIC (decl) && (DECL_COMMON (decl) \
|| DECL_ONE_ONLY (decl) \
|| DECL_WEAK (decl)))
/* Called from one_static_initialization_or_destruction(),
via walk_tree.
Walks the initializer list of a global variable and looks for
temporary variables (DECL_NAME() == NULL and DECL_ARTIFICIAL != 0)
and that have their DECL_CONTEXT() == NULL.
For each such temporary variable, set their DECL_CONTEXT() to
the current function. This is necessary because otherwise
some optimizers (enabled by -O2 -fprofile-arcs) might crash
when trying to refer to a temporary variable that does not have
it's DECL_CONTECT() properly set. */
static tree
fix_temporary_vars_context_r (tree *node,
int * /*unused*/,
void * /*unused1*/)
{
gcc_assert (current_function_decl);
if (TREE_CODE (*node) == BIND_EXPR)
{
tree var;
for (var = BIND_EXPR_VARS (*node); var; var = DECL_CHAIN (var))
if (VAR_P (var)
&& !DECL_NAME (var)
&& DECL_ARTIFICIAL (var)
&& !DECL_CONTEXT (var))
DECL_CONTEXT (var) = current_function_decl;
}
return NULL_TREE;
}
/* Set up to handle the initialization or destruction of DECL. If
INITP is nonzero, we are initializing the variable. Otherwise, we
are destroying it. */
static void
one_static_initialization_or_destruction (tree decl, tree init, bool initp)
{
tree guard_if_stmt = NULL_TREE;
tree guard;
/* If we are supposed to destruct and there's a trivial destructor,
nothing has to be done. */
if (!initp
&& TYPE_HAS_TRIVIAL_DESTRUCTOR (TREE_TYPE (decl)))
return;
/* Trick the compiler into thinking we are at the file and line
where DECL was declared so that error-messages make sense, and so
that the debugger will show somewhat sensible file and line
information. */
input_location = DECL_SOURCE_LOCATION (decl);
/* Make sure temporary variables in the initialiser all have
their DECL_CONTEXT() set to a value different from NULL_TREE.
This can happen when global variables initializers are built.
In that case, the DECL_CONTEXT() of the global variables _AND_ of all
the temporary variables that might have been generated in the
accompanying initializers is NULL_TREE, meaning the variables have been
declared in the global namespace.
What we want to do here is to fix that and make sure the DECL_CONTEXT()
of the temporaries are set to the current function decl. */
cp_walk_tree_without_duplicates (&init,
fix_temporary_vars_context_r,
NULL);
/* Because of:
[class.access.spec]
Access control for implicit calls to the constructors,
the conversion functions, or the destructor called to
create and destroy a static data member is performed as
if these calls appeared in the scope of the member's
class.
we pretend we are in a static member function of the class of
which the DECL is a member. */
if (member_p (decl))
{
DECL_CONTEXT (current_function_decl) = DECL_CONTEXT (decl);
DECL_STATIC_FUNCTION_P (current_function_decl) = 1;
}
/* Assume we don't need a guard. */
guard = NULL_TREE;
/* We need a guard if this is an object with external linkage that
might be initialized in more than one place. (For example, a
static data member of a template, when the data member requires
construction.) */
if (NEEDS_GUARD_P (decl))
{
tree guard_cond;
guard = get_guard (decl);
/* When using __cxa_atexit, we just check the GUARD as we would
for a local static. */
if (flag_use_cxa_atexit)
{
/* When using __cxa_atexit, we never try to destroy
anything from a static destructor. */
gcc_assert (initp);
guard_cond = get_guard_cond (guard, false);
}
/* If we don't have __cxa_atexit, then we will be running
destructors from .fini sections, or their equivalents. So,
we need to know how many times we've tried to initialize this
object. We do initializations only if the GUARD is zero,
i.e., if we are the first to initialize the variable. We do
destructions only if the GUARD is one, i.e., if we are the
last to destroy the variable. */
else if (initp)
guard_cond
= cp_build_binary_op (input_location,
EQ_EXPR,
cp_build_unary_op (PREINCREMENT_EXPR,
guard,
/*noconvert=*/true,
tf_warning_or_error),
integer_one_node,
tf_warning_or_error);
else
guard_cond
= cp_build_binary_op (input_location,
EQ_EXPR,
cp_build_unary_op (PREDECREMENT_EXPR,
guard,
/*noconvert=*/true,
tf_warning_or_error),
integer_zero_node,
tf_warning_or_error);
guard_if_stmt = begin_if_stmt ();
finish_if_stmt_cond (guard_cond, guard_if_stmt);
}
/* If we're using __cxa_atexit, we have not already set the GUARD,
so we must do so now. */
if (guard && initp && flag_use_cxa_atexit)
finish_expr_stmt (set_guard (guard));
/* Perform the initialization or destruction. */
if (initp)
{
if (init)
{
finish_expr_stmt (init);
if (sanitize_flags_p (SANITIZE_ADDRESS, decl))
{
varpool_node *vnode = varpool_node::get (decl);
if (vnode)
vnode->dynamically_initialized = 1;
}
}
/* If we're using __cxa_atexit, register a function that calls the
destructor for the object. */
if (flag_use_cxa_atexit)
finish_expr_stmt (register_dtor_fn (decl));
}
else
finish_expr_stmt (build_cleanup (decl));
/* Finish the guard if-stmt, if necessary. */
if (guard)
{
finish_then_clause (guard_if_stmt);
finish_if_stmt (guard_if_stmt);
}
/* Now that we're done with DECL we don't need to pretend to be a
member of its class any longer. */
DECL_CONTEXT (current_function_decl) = NULL_TREE;
DECL_STATIC_FUNCTION_P (current_function_decl) = 0;
}
/* Generate code to do the initialization or destruction of the decls in VARS,
a TREE_LIST of VAR_DECL with static storage duration.
Whether initialization or destruction is performed is specified by INITP. */
static void
do_static_initialization_or_destruction (tree vars, bool initp)
{
tree node, init_if_stmt, cond;
/* Build the outer if-stmt to check for initialization or destruction. */
init_if_stmt = begin_if_stmt ();
cond = initp ? integer_one_node : integer_zero_node;
cond = cp_build_binary_op (input_location,
EQ_EXPR,
initialize_p_decl,
cond,
tf_warning_or_error);
finish_if_stmt_cond (cond, init_if_stmt);
/* To make sure dynamic construction doesn't access globals from other
compilation units where they might not be yet constructed, for
-fsanitize=address insert __asan_before_dynamic_init call that
prevents access to either all global variables that need construction
in other compilation units, or at least those that haven't been
initialized yet. Variables that need dynamic construction in
the current compilation unit are kept accessible. */
if (initp && (flag_sanitize & SANITIZE_ADDRESS))
finish_expr_stmt (asan_dynamic_init_call (/*after_p=*/false));
node = vars;
do {
tree decl = TREE_VALUE (node);
tree priority_if_stmt;
int priority;
priority_info pi;
/* If we don't need a destructor, there's nothing to do. Avoid
creating a possibly empty if-stmt. */
if (!initp && TYPE_HAS_TRIVIAL_DESTRUCTOR (TREE_TYPE (decl)))
{
node = TREE_CHAIN (node);
continue;
}
/* Remember that we had an initialization or finalization at this
priority. */
priority = DECL_EFFECTIVE_INIT_PRIORITY (decl);
pi = get_priority_info (priority);
if (initp)
pi->initializations_p = 1;
else
pi->destructions_p = 1;
/* Conditionalize this initialization on being in the right priority
and being initializing/finalizing appropriately. */
priority_if_stmt = begin_if_stmt ();
cond = cp_build_binary_op (input_location,
EQ_EXPR,
priority_decl,
build_int_cst (NULL_TREE, priority),
tf_warning_or_error);
finish_if_stmt_cond (cond, priority_if_stmt);
/* Process initializers with same priority. */
for (; node
&& DECL_EFFECTIVE_INIT_PRIORITY (TREE_VALUE (node)) == priority;
node = TREE_CHAIN (node))
/* Do one initialization or destruction. */
one_static_initialization_or_destruction (TREE_VALUE (node),
TREE_PURPOSE (node), initp);
/* Finish up the priority if-stmt body. */
finish_then_clause (priority_if_stmt);
finish_if_stmt (priority_if_stmt);
} while (node);
/* Revert what __asan_before_dynamic_init did by calling
__asan_after_dynamic_init. */
if (initp && (flag_sanitize & SANITIZE_ADDRESS))
finish_expr_stmt (asan_dynamic_init_call (/*after_p=*/true));
/* Finish up the init/destruct if-stmt body. */
finish_then_clause (init_if_stmt);
finish_if_stmt (init_if_stmt);
}
/* VARS is a list of variables with static storage duration which may
need initialization and/or finalization. Remove those variables
that don't really need to be initialized or finalized, and return
the resulting list. The order in which the variables appear in
VARS is in reverse order of the order in which they should actually
be initialized. The list we return is in the unreversed order;
i.e., the first variable should be initialized first. */
static tree
prune_vars_needing_no_initialization (tree *vars)
{
tree *var = vars;
tree result = NULL_TREE;
while (*var)
{
tree t = *var;
tree decl = TREE_VALUE (t);
tree init = TREE_PURPOSE (t);
/* Deal gracefully with error. */
if (error_operand_p (decl))
{
var = &TREE_CHAIN (t);
continue;
}
/* The only things that can be initialized are variables. */
gcc_assert (VAR_P (decl));
/* If this object is not defined, we don't need to do anything
here. */
if (DECL_EXTERNAL (decl))
{
var = &TREE_CHAIN (t);
continue;
}
/* Also, if the initializer already contains errors, we can bail
out now. */
if (init && TREE_CODE (init) == TREE_LIST
&& value_member (error_mark_node, init))
{
var = &TREE_CHAIN (t);
continue;
}
/* This variable is going to need initialization and/or
finalization, so we add it to the list. */
*var = TREE_CHAIN (t);
TREE_CHAIN (t) = result;
result = t;
}
return result;
}
/* Make sure we have told the back end about all the variables in
VARS. */
static void
write_out_vars (tree vars)
{
tree v;
for (v = vars; v; v = TREE_CHAIN (v))
{
tree var = TREE_VALUE (v);
if (!var_finalized_p (var))
{
import_export_decl (var);
rest_of_decl_compilation (var, 1, 1);
}
}
}
/* Generate a static constructor (if CONSTRUCTOR_P) or destructor
(otherwise) that will initialize all global objects with static
storage duration having the indicated PRIORITY. */
static void
generate_ctor_or_dtor_function (bool constructor_p, int priority,
location_t *locus)
{
char function_key;
tree fndecl;
tree body;
size_t i;
input_location = *locus;
/* ??? */
/* Was: locus->line++; */
/* We use `I' to indicate initialization and `D' to indicate
destruction. */
function_key = constructor_p ? 'I' : 'D';
/* We emit the function lazily, to avoid generating empty
global constructors and destructors. */
body = NULL_TREE;
/* For Objective-C++, we may need to initialize metadata found in this module.
This must be done _before_ any other static initializations. */
if (c_dialect_objc () && (priority == DEFAULT_INIT_PRIORITY)
&& constructor_p && objc_static_init_needed_p ())
{
body = start_objects (function_key, priority);
objc_generate_static_init_call (NULL_TREE);
}
/* Call the static storage duration function with appropriate
arguments. */
FOR_EACH_VEC_SAFE_ELT (ssdf_decls, i, fndecl)
{
/* Calls to pure or const functions will expand to nothing. */
if (! (flags_from_decl_or_type (fndecl) & (ECF_CONST | ECF_PURE)))
{
tree call;
if (! body)
body = start_objects (function_key, priority);
call = cp_build_function_call_nary (fndecl, tf_warning_or_error,
build_int_cst (NULL_TREE,
constructor_p),
build_int_cst (NULL_TREE,
priority),
NULL_TREE);
finish_expr_stmt (call);
}
}
/* Close out the function. */
if (body)
finish_objects (function_key, priority, body);
}
/* Generate constructor and destructor functions for the priority
indicated by N. */
static int
generate_ctor_and_dtor_functions_for_priority (splay_tree_node n, void * data)
{
location_t *locus = (location_t *) data;
int priority = (int) n->key;
priority_info pi = (priority_info) n->value;
/* Generate the functions themselves, but only if they are really
needed. */
if (pi->initializations_p)
generate_ctor_or_dtor_function (/*constructor_p=*/true, priority, locus);
if (pi->destructions_p)
generate_ctor_or_dtor_function (/*constructor_p=*/false, priority, locus);
/* Keep iterating. */
return 0;
}
/* Return C++ property of T, based on given operation OP. */
static int
cpp_check (tree t, cpp_operation op)
{
switch (op)
{
case HAS_DEPENDENT_TEMPLATE_ARGS:
{
tree ti = CLASSTYPE_TEMPLATE_INFO (t);
if (!ti)
return 0;
++processing_template_decl;
const bool dep = any_dependent_template_arguments_p (TI_ARGS (ti));
--processing_template_decl;
return dep;
}
case IS_ABSTRACT:
return DECL_PURE_VIRTUAL_P (t);
case IS_ASSIGNMENT_OPERATOR:
return DECL_ASSIGNMENT_OPERATOR_P (t);
case IS_CONSTRUCTOR:
return DECL_CONSTRUCTOR_P (t);
case IS_DESTRUCTOR:
return DECL_DESTRUCTOR_P (t);
case IS_COPY_CONSTRUCTOR:
return DECL_COPY_CONSTRUCTOR_P (t);
case IS_MOVE_CONSTRUCTOR:
return DECL_MOVE_CONSTRUCTOR_P (t);
case IS_TEMPLATE:
return TREE_CODE (t) == TEMPLATE_DECL;
case IS_TRIVIAL:
return trivial_type_p (t);
default:
return 0;
}
}
/* Collect source file references recursively, starting from NAMESPC. */
static void
collect_source_refs (tree namespc)
{
/* Iterate over names in this name space. */
for (tree t = NAMESPACE_LEVEL (namespc)->names; t; t = TREE_CHAIN (t))
if (DECL_IS_BUILTIN (t))
;
else if (TREE_CODE (t) == NAMESPACE_DECL && !DECL_NAMESPACE_ALIAS (t))
collect_source_refs (t);
else
collect_source_ref (DECL_SOURCE_FILE (t));
}
/* Collect decls relevant to SOURCE_FILE from all namespaces recursively,
starting from NAMESPC. */
static void
collect_ada_namespace (tree namespc, const char *source_file)
{
tree decl = NAMESPACE_LEVEL (namespc)->names;
/* Collect decls from this namespace. This will skip
NAMESPACE_DECLs (both aliases and regular, it cannot tell). */
collect_ada_nodes (decl, source_file);
/* Now scan for namespace children, and dump them. */
for (; decl; decl = TREE_CHAIN (decl))
if (TREE_CODE (decl) == NAMESPACE_DECL && !DECL_NAMESPACE_ALIAS (decl))
collect_ada_namespace (decl, source_file);
}
/* Returns true iff there is a definition available for variable or
function DECL. */
bool
decl_defined_p (tree decl)
{
if (TREE_CODE (decl) == FUNCTION_DECL)
return (DECL_INITIAL (decl) != NULL_TREE
/* A pending instantiation of a friend temploid is defined. */
|| (DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (decl)
&& DECL_INITIAL (DECL_TEMPLATE_RESULT
(DECL_TI_TEMPLATE (decl)))));
else
{
gcc_assert (VAR_P (decl));
return !DECL_EXTERNAL (decl);
}
}
/* Nonzero for a VAR_DECL whose value can be used in a constant expression.
[expr.const]
An integral constant-expression can only involve ... const
variables of integral or enumeration types initialized with
constant expressions ...
C++0x also allows constexpr variables and temporaries initialized
with constant expressions. We handle the former here, but the latter
are just folded away in cxx_eval_constant_expression.
The standard does not require that the expression be non-volatile.
G++ implements the proposed correction in DR 457. */
bool
decl_constant_var_p (tree decl)
{
if (!decl_maybe_constant_var_p (decl))
return false;
/* We don't know if a template static data member is initialized with
a constant expression until we instantiate its initializer. Even
in the case of a constexpr variable, we can't treat it as a
constant until its initializer is complete in case it's used in
its own initializer. */
maybe_instantiate_decl (decl);
return DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl);
}
/* Returns true if DECL could be a symbolic constant variable, depending on
its initializer. */
bool
decl_maybe_constant_var_p (tree decl)
{
tree type = TREE_TYPE (decl);
if (!VAR_P (decl))
return false;
if (DECL_DECLARED_CONSTEXPR_P (decl) && !TREE_THIS_VOLATILE (decl))
return true;
if (DECL_HAS_VALUE_EXPR_P (decl))
/* A proxy isn't constant. */
return false;
if (TYPE_REF_P (type))
/* References can be constant. */;
else if (CP_TYPE_CONST_NON_VOLATILE_P (type)
&& INTEGRAL_OR_ENUMERATION_TYPE_P (type))
/* And const integers. */;
else
return false;
if (DECL_INITIAL (decl)
&& !DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl))
/* We know the initializer, and it isn't constant. */
return false;
else
return true;
}
/* Complain that DECL uses a type with no linkage. In C++98 mode this is
called from grokfndecl and grokvardecl; in all modes it is called from
cp_write_global_declarations. */
void
no_linkage_error (tree decl)
{
if (cxx_dialect >= cxx11
&& (decl_defined_p (decl)
/* Treat templates which limit_bad_template_recursion decided
not to instantiate as if they were defined. */
|| (errorcount + sorrycount > 0
&& DECL_LANG_SPECIFIC (decl)
&& DECL_TEMPLATE_INFO (decl)
&& TREE_NO_WARNING (decl))))
/* In C++11 it's ok if the decl is defined. */
return;
tree t = no_linkage_check (TREE_TYPE (decl), /*relaxed_p=*/false);
if (t == NULL_TREE)
/* The type that got us on no_linkage_decls must have gotten a name for
linkage purposes. */;
else if (CLASS_TYPE_P (t) && TYPE_BEING_DEFINED (t))
// FIXME: This is now invalid, as a DR to c++98
/* The type might end up having a typedef name for linkage purposes. */
vec_safe_push (no_linkage_decls, decl);
else if (TYPE_UNNAMED_P (t))
{
bool d = false;
auto_diagnostic_group grp;
if (cxx_dialect >= cxx11)
d = permerror (DECL_SOURCE_LOCATION (decl), "%q#D, declared using "
"unnamed type, is used but never defined", decl);
else if (DECL_EXTERN_C_P (decl))
/* Allow this; it's pretty common in C. */;
else if (VAR_P (decl))
/* DRs 132, 319 and 389 seem to indicate types with
no linkage can only be used to declare extern "C"
entities. Since it's not always an error in the
ISO C++ 90 Standard, we only issue a warning. */
d = warning_at (DECL_SOURCE_LOCATION (decl), 0, "unnamed type "
"with no linkage used to declare variable %q#D with "
"linkage", decl);
else
d = permerror (DECL_SOURCE_LOCATION (decl), "unnamed type with no "
"linkage used to declare function %q#D with linkage",
decl);
if (d && is_typedef_decl (TYPE_NAME (t)))
inform (DECL_SOURCE_LOCATION (TYPE_NAME (t)), "%q#D does not refer "
"to the unqualified type, so it is not used for linkage",
TYPE_NAME (t));
}
else if (cxx_dialect >= cxx11)
{
if (VAR_P (decl) || !DECL_PURE_VIRTUAL_P (decl))
permerror (DECL_SOURCE_LOCATION (decl),
"%q#D, declared using local type "
"%qT, is used but never defined", decl, t);
}
else if (VAR_P (decl))
warning_at (DECL_SOURCE_LOCATION (decl), 0, "type %qT with no linkage "
"used to declare variable %q#D with linkage", t, decl);
else
permerror (DECL_SOURCE_LOCATION (decl), "type %qT with no linkage used "
"to declare function %q#D with linkage", t, decl);
}
/* Collect declarations from all namespaces relevant to SOURCE_FILE. */
static void
collect_all_refs (const char *source_file)
{
collect_ada_namespace (global_namespace, source_file);
}
/* Clear DECL_EXTERNAL for NODE. */
static bool
clear_decl_external (struct cgraph_node *node, void * /*data*/)
{
DECL_EXTERNAL (node->decl) = 0;
return false;
}
/* Build up the function to run dynamic initializers for thread_local
variables in this translation unit and alias the init functions for the
individual variables to it. */
static void
handle_tls_init (void)
{
tree vars = prune_vars_needing_no_initialization (&tls_aggregates);
if (vars == NULL_TREE)
return;
location_t loc = DECL_SOURCE_LOCATION (TREE_VALUE (vars));
write_out_vars (vars);
tree guard = build_decl (loc, VAR_DECL, get_identifier ("__tls_guard"),
boolean_type_node);
TREE_PUBLIC (guard) = false;
TREE_STATIC (guard) = true;
DECL_ARTIFICIAL (guard) = true;
DECL_IGNORED_P (guard) = true;
TREE_USED (guard) = true;
CP_DECL_THREAD_LOCAL_P (guard) = true;
set_decl_tls_model (guard, decl_default_tls_model (guard));
pushdecl_top_level_and_finish (guard, NULL_TREE);
tree fn = get_local_tls_init_fn (loc);
start_preparsed_function (fn, NULL_TREE, SF_PRE_PARSED);
tree body = begin_function_body ();
tree if_stmt = begin_if_stmt ();
tree cond = cp_build_unary_op (TRUTH_NOT_EXPR, guard, false,
tf_warning_or_error);
finish_if_stmt_cond (cond, if_stmt);
finish_expr_stmt (cp_build_modify_expr (loc, guard, NOP_EXPR,
boolean_true_node,
tf_warning_or_error));
for (; vars; vars = TREE_CHAIN (vars))
{
tree var = TREE_VALUE (vars);
tree init = TREE_PURPOSE (vars);
one_static_initialization_or_destruction (var, init, true);
/* Output init aliases even with -fno-extern-tls-init. */
if (TARGET_SUPPORTS_ALIASES && TREE_PUBLIC (var))
{
tree single_init_fn = get_tls_init_fn (var);
if (single_init_fn == NULL_TREE)
continue;
cgraph_node *alias
= cgraph_node::get_create (fn)->create_same_body_alias
(single_init_fn, fn);
gcc_assert (alias != NULL);
}
}
finish_then_clause (if_stmt);
finish_if_stmt (if_stmt);
finish_function_body (body);
expand_or_defer_fn (finish_function (/*inline_p=*/false));
}
/* We're at the end of compilation, so generate any mangling aliases that
we've been saving up, if DECL is going to be output and ID2 isn't
already taken by another declaration. */
static void
generate_mangling_alias (tree decl, tree id2)
{
struct cgraph_node *n = NULL;
if (TREE_CODE (decl) == FUNCTION_DECL)
{
n = cgraph_node::get (decl);
if (!n)
/* Don't create an alias to an unreferenced function. */
return;
}
tree *slot
= mangled_decls->find_slot_with_hash (id2, IDENTIFIER_HASH_VALUE (id2),
INSERT);
/* If there's a declaration already using this mangled name,
don't create a compatibility alias that conflicts. */
if (*slot)
return;
tree alias = make_alias_for (decl, id2);
*slot = alias;
DECL_IGNORED_P (alias) = 1;
TREE_PUBLIC (alias) = TREE_PUBLIC (decl);
DECL_VISIBILITY (alias) = DECL_VISIBILITY (decl);
if (vague_linkage_p (decl))
DECL_WEAK (alias) = 1;
if (n)
n->create_same_body_alias (alias, decl);
else
varpool_node::create_extra_name_alias (alias, decl);
}
/* Note that we might want to emit an alias with the symbol ID2 for DECL at
the end of translation, for compatibility across bugs in the mangling
implementation. */
void
note_mangling_alias (tree decl, tree id2)
{
if (TARGET_SUPPORTS_ALIASES)
{
if (!defer_mangling_aliases)
generate_mangling_alias (decl, id2);
else
{
vec_safe_push (mangling_aliases, decl);
vec_safe_push (mangling_aliases, id2);
}
}
}
/* Emit all mangling aliases that were deferred up to this point. */
void
generate_mangling_aliases ()
{
while (!vec_safe_is_empty (mangling_aliases))
{
tree id2 = mangling_aliases->pop();
tree decl = mangling_aliases->pop();
generate_mangling_alias (decl, id2);
}
defer_mangling_aliases = false;
}
/* Record a mangling of DECL, whose DECL_ASSEMBLER_NAME has just been
set. NEED_WARNING is true if we must warn about collisions. We do
this to spot changes in mangling that may require compatibility
aliases. */
void
record_mangling (tree decl, bool need_warning)
{
if (!mangled_decls)
mangled_decls = hash_table<mangled_decl_hash>::create_ggc (499);
gcc_checking_assert (DECL_ASSEMBLER_NAME_SET_P (decl));
tree id = DECL_ASSEMBLER_NAME_RAW (decl);
tree *slot
= mangled_decls->find_slot_with_hash (id, IDENTIFIER_HASH_VALUE (id),
INSERT);
/* If this is already an alias, remove the alias, because the real
decl takes precedence. */
if (*slot && DECL_ARTIFICIAL (*slot) && DECL_IGNORED_P (*slot))
if (symtab_node *n = symtab_node::get (*slot))
if (n->cpp_implicit_alias)
{
n->remove ();
*slot = NULL_TREE;
}
if (!*slot)
*slot = decl;
else if (need_warning)
{
error_at (DECL_SOURCE_LOCATION (decl),
"mangling of %q#D as %qE conflicts with a previous mangle",
decl, id);
inform (DECL_SOURCE_LOCATION (*slot),
"previous mangling %q#D", *slot);
inform (DECL_SOURCE_LOCATION (decl),
"a later %<-fabi-version=%> (or =0)"
" avoids this error with a change in mangling");
*slot = decl;
}
}
/* The mangled name of DECL is being forcibly changed to NAME. Remove
any existing knowledge of DECL's mangled name meaning DECL. */
void
overwrite_mangling (tree decl, tree name)
{
if (tree id = DECL_ASSEMBLER_NAME_RAW (decl))
if ((TREE_CODE (decl) == VAR_DECL
|| TREE_CODE (decl) == FUNCTION_DECL)
&& mangled_decls)
if (tree *slot
= mangled_decls->find_slot_with_hash (id, IDENTIFIER_HASH_VALUE (id),
NO_INSERT))
if (*slot == decl)
{
mangled_decls->clear_slot (slot);
/* If this is an alias, remove it from the symbol table. */
if (DECL_ARTIFICIAL (decl) && DECL_IGNORED_P (decl))
if (symtab_node *n = symtab_node::get (decl))
if (n->cpp_implicit_alias)
n->remove ();
}
DECL_ASSEMBLER_NAME_RAW (decl) = name;
}
/* The entire file is now complete. If requested, dump everything
to a file. */
static void
dump_tu (void)
{
dump_flags_t flags;
if (FILE *stream = dump_begin (raw_dump_id, &flags))
{
dump_node (global_namespace, flags & ~TDF_SLIM, stream);
dump_end (raw_dump_id, stream);
}
}
static location_t locus_at_end_of_parsing;
/* Check the deallocation functions for CODE to see if we want to warn that
only one was defined. */
static void
maybe_warn_sized_delete (enum tree_code code)
{
tree sized = NULL_TREE;
tree unsized = NULL_TREE;
for (ovl_iterator iter (get_global_binding (ovl_op_identifier (false, code)));
iter; ++iter)
{
tree fn = *iter;
/* We're only interested in usual deallocation functions. */
if (!usual_deallocation_fn_p (fn))
continue;
if (FUNCTION_ARG_CHAIN (fn) == void_list_node)
unsized = fn;
else
sized = fn;
}
if (DECL_INITIAL (unsized) && !DECL_INITIAL (sized))
warning_at (DECL_SOURCE_LOCATION (unsized), OPT_Wsized_deallocation,
"the program should also define %qD", sized);
else if (!DECL_INITIAL (unsized) && DECL_INITIAL (sized))
warning_at (DECL_SOURCE_LOCATION (sized), OPT_Wsized_deallocation,
"the program should also define %qD", unsized);
}
/* Check the global deallocation functions to see if we want to warn about
defining unsized without sized (or vice versa). */
static void
maybe_warn_sized_delete ()
{
if (!flag_sized_deallocation || !warn_sized_deallocation)
return;
maybe_warn_sized_delete (DELETE_EXPR);
maybe_warn_sized_delete (VEC_DELETE_EXPR);
}
/* Earlier we left PTRMEM_CST in variable initializers alone so that we could
look them up when evaluating non-type template parameters. Now we need to
lower them to something the back end can understand. */
static void
lower_var_init ()
{
varpool_node *node;
FOR_EACH_VARIABLE (node)
{
tree d = node->decl;
if (tree init = DECL_INITIAL (d))
DECL_INITIAL (d) = cplus_expand_constant (init);
}
}
/* This routine is called at the end of compilation.
Its job is to create all the code needed to initialize and
destroy the global aggregates. We do the destruction
first, since that way we only need to reverse the decls once. */
void
c_parse_final_cleanups (void)
{
tree vars;
bool reconsider;
size_t i;
unsigned ssdf_count = 0;
int retries = 0;
tree decl;
locus_at_end_of_parsing = input_location;
at_eof = 1;
/* Bad parse errors. Just forget about it. */
if (! global_bindings_p () || current_class_type
|| !vec_safe_is_empty (decl_namespace_list))
return;
/* This is the point to write out a PCH if we're doing that.
In that case we do not want to do anything else. */
if (pch_file)
{
/* Mangle all symbols at PCH creation time. */
symtab_node *node;
FOR_EACH_SYMBOL (node)
if (! is_a <varpool_node *> (node)
|| ! DECL_HARD_REGISTER (node->decl))
DECL_ASSEMBLER_NAME (node->decl);
c_common_write_pch ();
dump_tu ();
/* Ensure even the callers don't try to finalize the CU. */
flag_syntax_only = 1;
return;
}
timevar_stop (TV_PHASE_PARSING);
timevar_start (TV_PHASE_DEFERRED);
symtab->process_same_body_aliases ();
/* Handle -fdump-ada-spec[-slim] */
if (flag_dump_ada_spec || flag_dump_ada_spec_slim)
{
collect_source_ref (main_input_filename);
if (!flag_dump_ada_spec_slim)
collect_source_refs (global_namespace);
dump_ada_specs (collect_all_refs, cpp_check);
}
/* FIXME - huh? was input_line -= 1;*/
/* We now have to write out all the stuff we put off writing out.
These include:
o Template specializations that we have not yet instantiated,
but which are needed.
o Initialization and destruction for non-local objects with
static storage duration. (Local objects with static storage
duration are initialized when their scope is first entered,
and are cleaned up via atexit.)
o Virtual function tables.
All of these may cause others to be needed. For example,
instantiating one function may cause another to be needed, and
generating the initializer for an object may cause templates to be
instantiated, etc., etc. */
emit_support_tinfos ();
/* Track vtables we want to emit that refer to consteval functions. */
auto_vec<tree> consteval_vtables;
do
{
tree t;
tree decl;
reconsider = false;
/* If there are templates that we've put off instantiating, do
them now. */
instantiate_pending_templates (retries);
ggc_collect ();
/* Write out virtual tables as required. Writing out the
virtual table for a template class may cause the
instantiation of members of that class. If we write out
vtables then we remove the class from our list so we don't
have to look at it again. */
for (i = keyed_classes->length ();
keyed_classes->iterate (--i, &t);)
if (maybe_emit_vtables (t, consteval_vtables))
{
reconsider = true;
keyed_classes->unordered_remove (i);
}
/* The input_location may have been changed during marking of
vtable entries. */
input_location = locus_at_end_of_parsing;
/* Write out needed type info variables. We have to be careful
looping through unemitted decls, because emit_tinfo_decl may
cause other variables to be needed. New elements will be
appended, and we remove from the vector those that actually
get emitted. */
for (i = unemitted_tinfo_decls->length ();
unemitted_tinfo_decls->iterate (--i, &t);)
if (emit_tinfo_decl (t))
{
reconsider = true;
unemitted_tinfo_decls->unordered_remove (i);
}
/* The list of objects with static storage duration is built up
in reverse order. We clear STATIC_AGGREGATES so that any new
aggregates added during the initialization of these will be
initialized in the correct order when we next come around the
loop. */
vars = prune_vars_needing_no_initialization (&static_aggregates);
if (vars)
{
/* We need to start a new initialization function each time
through the loop. That's because we need to know which
vtables have been referenced, and TREE_SYMBOL_REFERENCED
isn't computed until a function is finished, and written
out. That's a deficiency in the back end. When this is
fixed, these initialization functions could all become
inline, with resulting performance improvements. */
tree ssdf_body;
/* Make sure the back end knows about all the variables. */
write_out_vars (vars);
/* Set the line and file, so that it is obviously not from
the source file. */
input_location = locus_at_end_of_parsing;
ssdf_body = start_static_storage_duration_function (ssdf_count);
/* First generate code to do all the initializations. */
if (vars)
do_static_initialization_or_destruction (vars, /*initp=*/true);
/* Then, generate code to do all the destructions. Do these
in reverse order so that the most recently constructed
variable is the first destroyed. If we're using
__cxa_atexit, then we don't need to do this; functions
were registered at initialization time to destroy the
local statics. */
if (!flag_use_cxa_atexit && vars)
{
vars = nreverse (vars);
do_static_initialization_or_destruction (vars, /*initp=*/false);
}
else
vars = NULL_TREE;
/* Finish up the static storage duration function for this
round. */
input_location = locus_at_end_of_parsing;
finish_static_storage_duration_function (ssdf_body);
/* All those initializations and finalizations might cause
us to need more inline functions, more template
instantiations, etc. */
reconsider = true;
ssdf_count++;
/* ??? was: locus_at_end_of_parsing.line++; */
}
/* Now do the same for thread_local variables. */
handle_tls_init ();
/* Go through the set of inline functions whose bodies have not
been emitted yet. If out-of-line copies of these functions
are required, emit them. */
FOR_EACH_VEC_SAFE_ELT (deferred_fns, i, decl)
{
/* Does it need synthesizing? */
if (DECL_DEFAULTED_FN (decl) && ! DECL_INITIAL (decl)
&& (! DECL_REALLY_EXTERN (decl) || possibly_inlined_p (decl)))
{
/* Even though we're already at the top-level, we push
there again. That way, when we pop back a few lines
hence, all of our state is restored. Otherwise,
finish_function doesn't clean things up, and we end
up with CURRENT_FUNCTION_DECL set. */
push_to_top_level ();
/* The decl's location will mark where it was first
needed. Save that so synthesize method can indicate
where it was needed from, in case of error */
input_location = DECL_SOURCE_LOCATION (decl);
synthesize_method (decl);
pop_from_top_level ();
reconsider = true;
}
if (!DECL_INITIAL (decl) && decl_tls_wrapper_p (decl))
generate_tls_wrapper (decl);
if (!DECL_SAVED_TREE (decl))
continue;
cgraph_node *node = cgraph_node::get_create (decl);
/* We lie to the back end, pretending that some functions
are not defined when they really are. This keeps these
functions from being put out unnecessarily. But, we must
stop lying when the functions are referenced, or if they
are not comdat since they need to be put out now. If
DECL_INTERFACE_KNOWN, then we have already set
DECL_EXTERNAL appropriately, so there's no need to check
again, and we do not want to clear DECL_EXTERNAL if a
previous call to import_export_decl set it.
This is done in a separate for cycle, because if some
deferred function is contained in another deferred
function later in deferred_fns varray,
rest_of_compilation would skip this function and we
really cannot expand the same function twice. */
import_export_decl (decl);
if (DECL_NOT_REALLY_EXTERN (decl)
&& DECL_INITIAL (decl)
&& decl_needed_p (decl))
{
if (node->cpp_implicit_alias)
node = node->get_alias_target ();
node->call_for_symbol_thunks_and_aliases (clear_decl_external,
NULL, true);
/* If we mark !DECL_EXTERNAL one of the symbols in some comdat
group, we need to mark all symbols in the same comdat group
that way. */
if (node->same_comdat_group)
for (cgraph_node *next
= dyn_cast<cgraph_node *> (node->same_comdat_group);
next != node;
next = dyn_cast<cgraph_node *> (next->same_comdat_group))
next->call_for_symbol_thunks_and_aliases (clear_decl_external,
NULL, true);
}
/* If we're going to need to write this function out, and
there's already a body for it, create RTL for it now.
(There might be no body if this is a method we haven't
gotten around to synthesizing yet.) */
if (!DECL_EXTERNAL (decl)
&& decl_needed_p (decl)
&& !TREE_ASM_WRITTEN (decl)
&& !node->definition)
{
/* We will output the function; no longer consider it in this
loop. */
DECL_DEFER_OUTPUT (decl) = 0;
/* Generate RTL for this function now that we know we
need it. */
expand_or_defer_fn (decl);
reconsider = true;
}
}
if (wrapup_namespace_globals ())
reconsider = true;
/* Static data members are just like namespace-scope globals. */
FOR_EACH_VEC_SAFE_ELT (pending_statics, i, decl)
{
if (var_finalized_p (decl) || DECL_REALLY_EXTERN (decl)
/* Don't write it out if we haven't seen a definition. */
|| DECL_IN_AGGR_P (decl))
continue;
import_export_decl (decl);
/* If this static data member is needed, provide it to the
back end. */
if (DECL_NOT_REALLY_EXTERN (decl) && decl_needed_p (decl))
DECL_EXTERNAL (decl) = 0;
}
if (vec_safe_length (pending_statics) != 0
&& wrapup_global_declarations (pending_statics->address (),
pending_statics->length ()))
reconsider = true;
retries++;
}
while (reconsider);
lower_var_init ();
generate_mangling_aliases ();
/* All used inline functions must have a definition at this point. */
FOR_EACH_VEC_SAFE_ELT (deferred_fns, i, decl)
{
if (/* Check online inline functions that were actually used. */
DECL_ODR_USED (decl) && DECL_DECLARED_INLINE_P (decl)
/* If the definition actually was available here, then the
fact that the function was not defined merely represents
that for some reason (use of a template repository,
#pragma interface, etc.) we decided not to emit the
definition here. */
&& !DECL_INITIAL (decl)
/* Don't complain if the template was defined. */
&& !(DECL_TEMPLATE_INSTANTIATION (decl)
&& DECL_INITIAL (DECL_TEMPLATE_RESULT
(template_for_substitution (decl))))
&& warning_at (DECL_SOURCE_LOCATION (decl), 0,
"inline function %qD used but never defined", decl))
/* Avoid a duplicate warning from check_global_declaration. */
TREE_NO_WARNING (decl) = 1;
}
/* So must decls that use a type with no linkage. */
FOR_EACH_VEC_SAFE_ELT (no_linkage_decls, i, decl)
no_linkage_error (decl);
maybe_warn_sized_delete ();
/* Then, do the Objective-C stuff. This is where all the
Objective-C module stuff gets generated (symtab,
class/protocol/selector lists etc). This must be done after C++
templates, destructors etc. so that selectors used in C++
templates are properly allocated. */
if (c_dialect_objc ())
objc_write_global_declarations ();
/* We give C linkage to static constructors and destructors. */
push_lang_context (lang_name_c);
/* Generate initialization and destruction functions for all
priorities for which they are required. */
if (priority_info_map)
splay_tree_foreach (priority_info_map,
generate_ctor_and_dtor_functions_for_priority,
/*data=*/&locus_at_end_of_parsing);
else if (c_dialect_objc () && objc_static_init_needed_p ())
/* If this is obj-c++ and we need a static init, call
generate_ctor_or_dtor_function. */
generate_ctor_or_dtor_function (/*constructor_p=*/true,
DEFAULT_INIT_PRIORITY,
&locus_at_end_of_parsing);
/* We're done with the splay-tree now. */
if (priority_info_map)
splay_tree_delete (priority_info_map);
/* Generate any missing aliases. */
maybe_apply_pending_pragma_weaks ();
/* We're done with static constructors, so we can go back to "C++"
linkage now. */
pop_lang_context ();
if (flag_vtable_verify)
{
vtv_recover_class_info ();
vtv_compute_class_hierarchy_transitive_closure ();
vtv_build_vtable_verify_fndecl ();
}
perform_deferred_noexcept_checks ();
fini_constexpr ();
clear_consteval_vfns (consteval_vtables);
/* The entire file is now complete. If requested, dump everything
to a file. */
dump_tu ();
if (flag_detailed_statistics)
{
dump_tree_statistics ();
dump_time_statistics ();
}
timevar_stop (TV_PHASE_DEFERRED);
timevar_start (TV_PHASE_PARSING);
/* Indicate that we're done with front end processing. */
at_eof = 2;
}
/* Perform any post compilation-proper cleanups for the C++ front-end.
This should really go away. No front-end should need to do
anything past the compilation process. */
void
cxx_post_compilation_parsing_cleanups (void)
{
timevar_start (TV_PHASE_LATE_PARSING_CLEANUPS);
if (flag_vtable_verify)
{
/* Generate the special constructor initialization function that
calls __VLTRegisterPairs, and give it a very high
initialization priority. This must be done after
finalize_compilation_unit so that we have accurate
information about which vtable will actually be emitted. */
vtv_generate_init_routine ();
}
input_location = locus_at_end_of_parsing;
if (flag_checking)
validate_conversion_obstack ();
timevar_stop (TV_PHASE_LATE_PARSING_CLEANUPS);
}
/* FN is an OFFSET_REF, DOTSTAR_EXPR or MEMBER_REF indicating the
function to call in parse-tree form; it has not yet been
semantically analyzed. ARGS are the arguments to the function.
They have already been semantically analyzed. This may change
ARGS. */
tree
build_offset_ref_call_from_tree (tree fn, vec<tree, va_gc> **args,
tsubst_flags_t complain)
{
tree orig_fn;
vec<tree, va_gc> *orig_args = NULL;
tree expr;
tree object;
orig_fn = fn;
object = TREE_OPERAND (fn, 0);
if (processing_template_decl)
{
gcc_assert (TREE_CODE (fn) == DOTSTAR_EXPR
|| TREE_CODE (fn) == MEMBER_REF);
if (type_dependent_expression_p (fn)
|| any_type_dependent_arguments_p (*args))
return build_min_nt_call_vec (fn, *args);
orig_args = make_tree_vector_copy (*args);
/* Transform the arguments and add the implicit "this"
parameter. That must be done before the FN is transformed
because we depend on the form of FN. */
make_args_non_dependent (*args);
object = build_non_dependent_expr (object);
if (TREE_CODE (TREE_TYPE (fn)) == METHOD_TYPE)
{
if (TREE_CODE (fn) == DOTSTAR_EXPR)
object = cp_build_addr_expr (object, complain);
vec_safe_insert (*args, 0, object);
}
/* Now that the arguments are done, transform FN. */
fn = build_non_dependent_expr (fn);
}
/* A qualified name corresponding to a bound pointer-to-member is
represented as an OFFSET_REF:
struct B { void g(); };
void (B::*p)();
void B::g() { (this->*p)(); } */
if (TREE_CODE (fn) == OFFSET_REF)
{
tree object_addr = cp_build_addr_expr (object, complain);
fn = TREE_OPERAND (fn, 1);
fn = get_member_function_from_ptrfunc (&object_addr, fn,
complain);
vec_safe_insert (*args, 0, object_addr);
}
if (CLASS_TYPE_P (TREE_TYPE (fn)))
expr = build_op_call (fn, args, complain);
else
expr = cp_build_function_call_vec (fn, args, complain);
if (processing_template_decl && expr != error_mark_node)
expr = build_min_non_dep_call_vec (expr, orig_fn, orig_args);
if (orig_args != NULL)
release_tree_vector (orig_args);
return expr;
}
void
check_default_args (tree x)
{
tree arg = TYPE_ARG_TYPES (TREE_TYPE (x));
bool saw_def = false;
bool noted_first_def = false;
int idx_of_first_default_arg = 0;
location_t loc_of_first_default_arg = UNKNOWN_LOCATION;
int i = 0 - (TREE_CODE (TREE_TYPE (x)) == METHOD_TYPE);
tree fndecl = STRIP_TEMPLATE (x);
auto_diagnostic_group d;
for (; arg && arg != void_list_node; arg = TREE_CHAIN (arg), ++i)
{
if (TREE_PURPOSE (arg))
{
if (!saw_def)
{
saw_def = true;
idx_of_first_default_arg = i;
location_t loc = get_fndecl_argument_location (fndecl, i);
if (loc != DECL_SOURCE_LOCATION (x))
loc_of_first_default_arg = loc;
}
}
else if (saw_def && !PACK_EXPANSION_P (TREE_VALUE (arg)))
{
error_at (get_fndecl_argument_location (fndecl, i),
"default argument missing for parameter %P of %q#D", i, x);
if (loc_of_first_default_arg != UNKNOWN_LOCATION
&& !noted_first_def)
{
inform (loc_of_first_default_arg,
"...following parameter %P which has a default argument",
idx_of_first_default_arg);
noted_first_def = true;
}
TREE_PURPOSE (arg) = error_mark_node;
}
}
}
/* Return true if function DECL can be inlined. This is used to force
instantiation of methods that might be interesting for inlining. */
bool
possibly_inlined_p (tree decl)
{
gcc_assert (TREE_CODE (decl) == FUNCTION_DECL);
if (DECL_UNINLINABLE (decl))
return false;
if (!optimize)
return DECL_DECLARED_INLINE_P (decl);
/* When optimizing, we might inline everything when flatten
attribute or heuristics inlining for size or autoinlining
is used. */
return true;
}
/* Normally, we can wait until instantiation-time to synthesize DECL.
However, if DECL is a static data member initialized with a constant
or a constexpr function, we need it right now because a reference to
such a data member or a call to such function is not value-dependent.
For a function that uses auto in the return type, we need to instantiate
it to find out its type. For OpenMP user defined reductions, we need
them instantiated for reduction clauses which inline them by hand
directly. */
void
maybe_instantiate_decl (tree decl)
{
if (DECL_LANG_SPECIFIC (decl)
&& DECL_TEMPLATE_INFO (decl)
&& (decl_maybe_constant_var_p (decl)
|| (TREE_CODE (decl) == FUNCTION_DECL
&& DECL_OMP_DECLARE_REDUCTION_P (decl))
|| undeduced_auto_decl (decl))
&& !DECL_DECLARED_CONCEPT_P (decl)
&& !uses_template_parms (DECL_TI_ARGS (decl)))
{
/* Instantiating a function will result in garbage collection. We
must treat this situation as if we were within the body of a
function so as to avoid collecting live data only referenced from
the stack (such as overload resolution candidates). */
++function_depth;
instantiate_decl (decl, /*defer_ok=*/false,
/*expl_inst_class_mem_p=*/false);
--function_depth;
}
}
/* Maybe warn if DECL is deprecated, subject to COMPLAIN. Returns whether or
not a warning was emitted. */
bool
cp_warn_deprecated_use (tree decl, tsubst_flags_t complain)
{
if (!(complain & tf_warning) || !decl
|| deprecated_state == DEPRECATED_SUPPRESS)
return false;
if (!TREE_DEPRECATED (decl))
{
/* Perhaps this is a deprecated typedef. */
if (TYPE_P (decl) && TYPE_NAME (decl))
decl = TYPE_NAME (decl);
if (!TREE_DEPRECATED (decl))
return false;
}
/* Don't warn within members of a deprecated type. */
if (TYPE_P (decl)
&& currently_open_class (decl))
return false;
bool warned = false;
if (cxx_dialect >= cxx11
&& DECL_P (decl)
&& DECL_ARTIFICIAL (decl)
&& DECL_NONSTATIC_MEMBER_FUNCTION_P (decl)
&& copy_fn_p (decl))
{
if (warn_deprecated_copy
/* Don't warn about system library classes (c++/86342). */
&& (!DECL_IN_SYSTEM_HEADER (decl)
|| global_dc->dc_warn_system_headers))
{
auto_diagnostic_group d;
tree ctx = DECL_CONTEXT (decl);
tree other = classtype_has_depr_implicit_copy (ctx);
int opt = (DECL_DESTRUCTOR_P (other)
? OPT_Wdeprecated_copy_dtor
: OPT_Wdeprecated_copy);
warned = warning (opt, "implicitly-declared %qD is deprecated",
decl);
if (warned)
inform (DECL_SOURCE_LOCATION (other),
"because %qT has user-provided %qD",
ctx, other);
}
}
else
warned = warn_deprecated_use (decl, NULL_TREE);
return warned;
}
/* Like above, but takes into account outer scopes. */
void
cp_warn_deprecated_use_scopes (tree scope)
{
while (scope
&& scope != error_mark_node
&& scope != global_namespace)
{
if (cp_warn_deprecated_use (scope))
return;
if (TYPE_P (scope))
scope = CP_TYPE_CONTEXT (scope);
else
scope = CP_DECL_CONTEXT (scope);
}
}
/* True if DECL or its enclosing scope have unbound template parameters. */
bool
decl_dependent_p (tree decl)
{
if (DECL_FUNCTION_SCOPE_P (decl)
|| TREE_CODE (decl) == CONST_DECL
|| TREE_CODE (decl) == USING_DECL
|| TREE_CODE (decl) == FIELD_DECL)
decl = CP_DECL_CONTEXT (decl);
if (tree tinfo = get_template_info (decl))
if (any_dependent_template_arguments_p (TI_ARGS (tinfo)))
return true;
if (LAMBDA_FUNCTION_P (decl)
&& dependent_type_p (DECL_CONTEXT (decl)))
return true;
return false;
}
/* Mark DECL (either a _DECL or a BASELINK) as "used" in the program.
If DECL is a specialization or implicitly declared class member,
generate the actual definition. Return false if something goes
wrong, true otherwise. */
bool
mark_used (tree decl, tsubst_flags_t complain)
{
/* If we're just testing conversions or resolving overloads, we
don't want any permanent effects like forcing functions to be
output or instantiating templates. */
if ((complain & tf_conv))
return true;
/* If DECL is a BASELINK for a single function, then treat it just
like the DECL for the function. Otherwise, if the BASELINK is
for an overloaded function, we don't know which function was
actually used until after overload resolution. */
if (BASELINK_P (decl))
{
decl = BASELINK_FUNCTIONS (decl);
if (really_overloaded_fn (decl))
return true;
decl = OVL_FIRST (decl);
}
if (!DECL_P (decl))
return true;
/* Set TREE_USED for the benefit of -Wunused. */
TREE_USED (decl) = true;
/* And for structured bindings also the underlying decl. */
if (DECL_DECOMPOSITION_P (decl) && DECL_DECOMP_BASE (decl))
TREE_USED (DECL_DECOMP_BASE (decl)) = true;
if (TREE_CODE (decl) == TEMPLATE_DECL)
return true;
if (DECL_CLONED_FUNCTION_P (decl))
TREE_USED (DECL_CLONED_FUNCTION (decl)) = 1;
/* Mark enumeration types as used. */
if (TREE_CODE (decl) == CONST_DECL)
used_types_insert (DECL_CONTEXT (decl));
if (TREE_CODE (decl) == FUNCTION_DECL
&& !maybe_instantiate_noexcept (decl, complain))
return false;
if (TREE_CODE (decl) == FUNCTION_DECL
&& DECL_DELETED_FN (decl))
{
if (DECL_ARTIFICIAL (decl)
&& DECL_CONV_FN_P (decl)
&& LAMBDA_TYPE_P (DECL_CONTEXT (decl)))
/* We mark a lambda conversion op as deleted if we can't
generate it properly; see maybe_add_lambda_conv_op. */
sorry ("converting lambda that uses %<...%> to function pointer");
else if (complain & tf_error)
{
error ("use of deleted function %qD", decl);
if (!maybe_explain_implicit_delete (decl))
inform (DECL_SOURCE_LOCATION (decl), "declared here");
}
return false;
}
if (VAR_OR_FUNCTION_DECL_P (decl) && DECL_LOCAL_DECL_P (decl))
{
if (!DECL_LANG_SPECIFIC (decl))
/* An unresolved dependent local extern. */
return true;
DECL_ODR_USED (decl) = 1;
auto alias = DECL_LOCAL_DECL_ALIAS (decl);
if (!alias || alias == error_mark_node)
return true;
/* Process the underlying decl. */
decl = alias;
TREE_USED (decl) = true;
}
cp_warn_deprecated_use (decl, complain);
/* We can only check DECL_ODR_USED on variables or functions with
DECL_LANG_SPECIFIC set, and these are also the only decls that we
might need special handling for. */
if (!VAR_OR_FUNCTION_DECL_P (decl)
|| DECL_LANG_SPECIFIC (decl) == NULL
|| DECL_THUNK_P (decl))
{
if (!decl_dependent_p (decl)
&& !require_deduced_type (decl, complain))
return false;
return true;
}
/* We only want to do this processing once. We don't need to keep trying
to instantiate inline templates, because unit-at-a-time will make sure
we get them compiled before functions that want to inline them. */
if (DECL_ODR_USED (decl))
return true;
/* Normally, we can wait until instantiation-time to synthesize DECL.
However, if DECL is a static data member initialized with a constant
or a constexpr function, we need it right now because a reference to
such a data member or a call to such function is not value-dependent.
For a function that uses auto in the return type, we need to instantiate
it to find out its type. For OpenMP user defined reductions, we need
them instantiated for reduction clauses which inline them by hand
directly. */
maybe_instantiate_decl (decl);
if (flag_concepts && TREE_CODE (decl) == FUNCTION_DECL
&& !constraints_satisfied_p (decl))
{
if (complain & tf_error)
{
auto_diagnostic_group d;
error ("use of function %qD with unsatisfied constraints",
decl);
location_t loc = DECL_SOURCE_LOCATION (decl);
inform (loc, "declared here");
diagnose_constraints (loc, decl, NULL_TREE);
}
return false;
}
if (processing_template_decl || in_template_function ())
return true;
/* Check this too in case we're within instantiate_non_dependent_expr. */
if (DECL_TEMPLATE_INFO (decl)
&& uses_template_parms (DECL_TI_ARGS (decl)))
return true;
if (!require_deduced_type (decl, complain))
return false;
if (builtin_pack_fn_p (decl))
{
error ("use of built-in parameter pack %qD outside of a template",
DECL_NAME (decl));
return false;
}
/* If we don't need a value, then we don't need to synthesize DECL. */
if (cp_unevaluated_operand || in_discarded_stmt)
return true;
DECL_ODR_USED (decl) = 1;
if (DECL_CLONED_FUNCTION_P (decl))
DECL_ODR_USED (DECL_CLONED_FUNCTION (decl)) = 1;
/* DR 757: A type without linkage shall not be used as the type of a
variable or function with linkage, unless
o the variable or function has extern "C" linkage (7.5 [dcl.link]), or
o the variable or function is not used (3.2 [basic.def.odr]) or is
defined in the same translation unit. */
if (cxx_dialect > cxx98
&& decl_linkage (decl) != lk_none
&& !DECL_EXTERN_C_P (decl)
&& !DECL_ARTIFICIAL (decl)
&& !decl_defined_p (decl)
&& no_linkage_check (TREE_TYPE (decl), /*relaxed_p=*/false))
vec_safe_push (no_linkage_decls, decl);
if (TREE_CODE (decl) == FUNCTION_DECL
&& DECL_DECLARED_INLINE_P (decl)
&& !DECL_INITIAL (decl)
&& !DECL_ARTIFICIAL (decl)
&& !DECL_PURE_VIRTUAL_P (decl))
/* Remember it, so we can check it was defined. */
note_vague_linkage_fn (decl);
/* Is it a synthesized method that needs to be synthesized? */
if (TREE_CODE (decl) == FUNCTION_DECL
&& DECL_DEFAULTED_FN (decl)
/* A function defaulted outside the class is synthesized either by
cp_finish_decl or instantiate_decl. */
&& !DECL_DEFAULTED_OUTSIDE_CLASS_P (decl)
&& ! DECL_INITIAL (decl))
{
/* Defer virtual destructors so that thunks get the right
linkage. */
if (DECL_VIRTUAL_P (decl) && !at_eof)
{
note_vague_linkage_fn (decl);
return true;
}
/* Remember the current location for a function we will end up
synthesizing. Then we can inform the user where it was
required in the case of error. */
if (decl_remember_implicit_trigger_p (decl))
DECL_SOURCE_LOCATION (decl) = input_location;
/* Synthesizing an implicitly defined member function will result in
garbage collection. We must treat this situation as if we were
within the body of a function so as to avoid collecting live data
on the stack (such as overload resolution candidates).
We could just let c_parse_final_cleanups handle synthesizing
this function by adding it to deferred_fns, but doing
it at the use site produces better error messages. */
++function_depth;
synthesize_method (decl);
--function_depth;
/* If this is a synthesized method we don't need to
do the instantiation test below. */
}
else if (VAR_OR_FUNCTION_DECL_P (decl)
&& DECL_TEMPLATE_INFO (decl)
&& !DECL_DECLARED_CONCEPT_P (decl)
&& (!DECL_EXPLICIT_INSTANTIATION (decl)
|| always_instantiate_p (decl)))
/* If this is a function or variable that is an instance of some
template, we now know that we will need to actually do the
instantiation. We check that DECL is not an explicit
instantiation because that is not checked in instantiate_decl.
We put off instantiating functions in order to improve compile
times. Maintaining a stack of active functions is expensive,
and the inliner knows to instantiate any functions it might
need. Therefore, we always try to defer instantiation. */
{
++function_depth;
instantiate_decl (decl, /*defer_ok=*/true,
/*expl_inst_class_mem_p=*/false);
--function_depth;
}
return true;
}
bool
mark_used (tree decl)
{
return mark_used (decl, tf_warning_or_error);
}
tree
vtv_start_verification_constructor_init_function (void)
{
return start_objects ('I', MAX_RESERVED_INIT_PRIORITY - 1);
}
tree
vtv_finish_verification_constructor_init_function (tree function_body)
{
tree fn;
finish_compound_stmt (function_body);
fn = finish_function (/*inline_p=*/false);
DECL_STATIC_CONSTRUCTOR (fn) = 1;
decl_init_priority_insert (fn, MAX_RESERVED_INIT_PRIORITY - 1);
return fn;
}
#include "gt-cp-decl2.h"
|
convolutiondepthwise_5x5_pack4_bf16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw5x5s1_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g=0; g<group; g++)
{
Mat out = top_blob.channel(g);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f);
const unsigned short* kptr = kernel.row<const unsigned short>(g);
unsigned short* outptr0 = out.row<unsigned short>(0);
unsigned short* outptr1 = out.row<unsigned short>(1);
const Mat img0 = bottom_blob.channel(g);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
const unsigned short* r3 = img0.row<const unsigned short>(3);
const unsigned short* r4 = img0.row<const unsigned short>(4);
const unsigned short* r5 = img0.row<const unsigned short>(5);
#if __aarch64__
// 4 * 25
uint16x8_t _k00_01 = vld1q_u16(kptr);
uint16x8_t _k02_03 = vld1q_u16(kptr+8);
uint16x8_t _k04_10 = vld1q_u16(kptr+16);
uint16x8_t _k11_12 = vld1q_u16(kptr+24);
uint16x8_t _k13_14 = vld1q_u16(kptr+32);
uint16x8_t _k20_21 = vld1q_u16(kptr+40);
uint16x8_t _k22_23 = vld1q_u16(kptr+48);
uint16x8_t _k24_30 = vld1q_u16(kptr+56);
uint16x8_t _k31_32 = vld1q_u16(kptr+64);
uint16x8_t _k33_34 = vld1q_u16(kptr+72);
uint16x8_t _k40_41 = vld1q_u16(kptr+80);
uint16x8_t _k42_43 = vld1q_u16(kptr+88);
uint16x4_t _k44 = vld1_u16(kptr+96);
#else // __aarch64__
float bias0_data[4];
if (bias)
{
bias0_data[0] = bias[g * 4 + 0];
bias0_data[1] = bias[g * 4 + 1];
bias0_data[2] = bias[g * 4 + 2];
bias0_data[3] = bias[g * 4 + 3];
}
else
{
bias0_data[0] = 0.f;
bias0_data[1] = 0.f;
bias0_data[2] = 0.f;
bias0_data[3] = 0.f;
}
const float* bias0_data_ptr = bias0_data;
#endif // __aarch64__
int i = 0;
#if __aarch64__
for (; i+1 < outh; i+=2)
{
int j = 0;
for (; j+3 < outw; j+=4)
{
asm volatile(
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n"// r10 r11 r12 r13
"shll2 v14.4s, %18.8h, #16 \n"
"mov v24.16b, %29.16b \n"// sum00
"mov v25.16b, %29.16b \n"// sum01
"mov v26.16b, %29.16b \n"// sum02
"mov v27.16b, %29.16b \n"// sum03
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"mov v28.16b, %29.16b \n"// sum10
"mov v29.16b, %29.16b \n"// sum11
"mov v30.16b, %29.16b \n"// sum12
"mov v31.16b, %29.16b \n"// sum13
"shll v15.4s, %16.4h, #16 \n"
"fmla v24.4s, v14.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v25.4s, v14.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v26.4s, v14.4s, v18.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3] \n"// r14 r15 r16 r17
"fmla v27.4s, v14.4s, v19.4s \n"
"shll v14.4s, %19.4h, #16 \n"
"fmla v28.4s, v15.4s, v16.4s \n"
"fmla v29.4s, v15.4s, v17.4s \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %16.8h, #16 \n"
"fmla v24.4s, v14.4s, v17.4s \n"
"fmla v25.4s, v14.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v26.4s, v14.4s, v19.4s \n"
"fmla v27.4s, v14.4s, v20.4s \n"
"shll2 v14.4s, %19.8h, #16 \n"
"fmla v28.4s, v15.4s, v17.4s \n"
"fmla v29.4s, v15.4s, v18.4s \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll v15.4s, %17.4h, #16 \n"
"fmla v24.4s, v14.4s, v18.4s \n"
"fmla v25.4s, v14.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v26.4s, v14.4s, v20.4s \n"
"fmla v27.4s, v14.4s, v21.4s \n"
"shll v14.4s, %20.4h, #16 \n"
"fmla v28.4s, v15.4s, v18.4s \n"
"fmla v29.4s, v15.4s, v19.4s \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"shll2 v15.4s, %17.8h, #16 \n"
"fmla v24.4s, v14.4s, v19.4s \n"
"fmla v25.4s, v14.4s, v20.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v26.4s, v14.4s, v21.4s \n"
"fmla v27.4s, v14.4s, v22.4s \n"
"shll2 v14.4s, %20.8h, #16 \n"
"fmla v28.4s, v15.4s, v19.4s \n"
"fmla v29.4s, v15.4s, v20.4s \n"
"fmla v30.4s, v15.4s, v21.4s \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%4], #32 \n"// r20 r21 r22 r23
"fmla v31.4s, v15.4s, v22.4s \n"
"shll v15.4s, %18.4h, #16 \n"
"fmla v24.4s, v14.4s, v20.4s \n"
"fmla v25.4s, v14.4s, v21.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v26.4s, v14.4s, v22.4s \n"
"fmla v27.4s, v14.4s, v23.4s \n"
"shll v14.4s, %21.4h, #16 \n"
"fmla v28.4s, v15.4s, v20.4s \n"
"fmla v29.4s, v15.4s, v21.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v30.4s, v15.4s, v22.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v31.4s, v15.4s, v23.4s \n"
"shll2 v15.4s, %18.8h, #16 \n"
"fmla v24.4s, v14.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v25.4s, v14.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v26.4s, v14.4s, v18.4s \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%4] \n"// r24 r25 r26 r27
"fmla v27.4s, v14.4s, v19.4s \n"
"shll2 v14.4s, %21.8h, #16 \n"
"fmla v28.4s, v15.4s, v16.4s \n"
"fmla v29.4s, v15.4s, v17.4s \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll v15.4s, %19.4h, #16 \n"
"fmla v24.4s, v14.4s, v17.4s \n"
"fmla v25.4s, v14.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v26.4s, v14.4s, v19.4s \n"
"fmla v27.4s, v14.4s, v20.4s \n"
"shll v14.4s, %22.4h, #16 \n"
"fmla v28.4s, v15.4s, v17.4s \n"
"fmla v29.4s, v15.4s, v18.4s \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll2 v15.4s, %19.8h, #16 \n"
"fmla v24.4s, v14.4s, v18.4s \n"
"fmla v25.4s, v14.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v26.4s, v14.4s, v20.4s \n"
"fmla v27.4s, v14.4s, v21.4s \n"
"shll2 v14.4s, %22.8h, #16 \n"
"fmla v28.4s, v15.4s, v18.4s \n"
"fmla v29.4s, v15.4s, v19.4s \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"shll v15.4s, %20.4h, #16 \n"
"fmla v24.4s, v14.4s, v19.4s \n"
"fmla v25.4s, v14.4s, v20.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v26.4s, v14.4s, v21.4s \n"
"fmla v27.4s, v14.4s, v22.4s \n"
"shll v14.4s, %23.4h, #16 \n"
"fmla v28.4s, v15.4s, v19.4s \n"
"fmla v29.4s, v15.4s, v20.4s \n"
"fmla v30.4s, v15.4s, v21.4s \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%5], #32 \n"// r30 r31 r32 r33
"fmla v31.4s, v15.4s, v22.4s \n"
"shll2 v15.4s, %20.8h, #16 \n"
"fmla v24.4s, v14.4s, v20.4s \n"
"fmla v25.4s, v14.4s, v21.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v26.4s, v14.4s, v22.4s \n"
"fmla v27.4s, v14.4s, v23.4s \n"
"shll2 v14.4s, %23.8h, #16 \n"
"fmla v28.4s, v15.4s, v20.4s \n"
"fmla v29.4s, v15.4s, v21.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v30.4s, v15.4s, v22.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v31.4s, v15.4s, v23.4s \n"
"shll v15.4s, %21.4h, #16 \n"
"fmla v24.4s, v14.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v25.4s, v14.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v26.4s, v14.4s, v18.4s \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%5] \n"// r34 r35 r36 r37
"fmla v27.4s, v14.4s, v19.4s \n"
"shll v14.4s, %24.4h, #16 \n"
"fmla v28.4s, v15.4s, v16.4s \n"
"fmla v29.4s, v15.4s, v17.4s \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %21.8h, #16 \n"
"fmla v24.4s, v14.4s, v17.4s \n"
"fmla v25.4s, v14.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v26.4s, v14.4s, v19.4s \n"
"fmla v27.4s, v14.4s, v20.4s \n"
"shll2 v14.4s, %24.8h, #16 \n"
"fmla v28.4s, v15.4s, v17.4s \n"
"fmla v29.4s, v15.4s, v18.4s \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll v15.4s, %22.4h, #16 \n"
"fmla v24.4s, v14.4s, v18.4s \n"
"fmla v25.4s, v14.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v26.4s, v14.4s, v20.4s \n"
"fmla v27.4s, v14.4s, v21.4s \n"
"shll v14.4s, %25.4h, #16 \n"
"fmla v28.4s, v15.4s, v18.4s \n"
"fmla v29.4s, v15.4s, v19.4s \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"shll2 v15.4s, %22.8h, #16 \n"
"fmla v24.4s, v14.4s, v19.4s \n"
"fmla v25.4s, v14.4s, v20.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v26.4s, v14.4s, v21.4s \n"
"fmla v27.4s, v14.4s, v22.4s \n"
"shll2 v14.4s, %25.8h, #16 \n"
"fmla v28.4s, v15.4s, v19.4s \n"
"fmla v29.4s, v15.4s, v20.4s \n"
"fmla v30.4s, v15.4s, v21.4s \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"// r40 r41 r42 r43
"fmla v31.4s, v15.4s, v22.4s \n"
"shll v15.4s, %23.4h, #16 \n"
"fmla v24.4s, v14.4s, v20.4s \n"
"fmla v25.4s, v14.4s, v21.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v26.4s, v14.4s, v22.4s \n"
"fmla v27.4s, v14.4s, v23.4s \n"
"shll v14.4s, %26.4h, #16 \n"
"fmla v28.4s, v15.4s, v20.4s \n"
"fmla v29.4s, v15.4s, v21.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v30.4s, v15.4s, v22.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v31.4s, v15.4s, v23.4s \n"
"shll2 v15.4s, %23.8h, #16 \n"
"fmla v24.4s, v14.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v25.4s, v14.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v26.4s, v14.4s, v18.4s \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%6] \n"// r44 r45 r46 r47
"fmla v27.4s, v14.4s, v19.4s \n"
"shll2 v14.4s, %26.8h, #16 \n"
"fmla v28.4s, v15.4s, v16.4s \n"
"fmla v29.4s, v15.4s, v17.4s \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll v15.4s, %24.4h, #16 \n"
"fmla v24.4s, v14.4s, v17.4s \n"
"fmla v25.4s, v14.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v26.4s, v14.4s, v19.4s \n"
"fmla v27.4s, v14.4s, v20.4s \n"
"shll v14.4s, %27.4h, #16 \n"
"fmla v28.4s, v15.4s, v17.4s \n"
"fmla v29.4s, v15.4s, v18.4s \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll2 v15.4s, %24.8h, #16 \n"
"fmla v24.4s, v14.4s, v18.4s \n"
"fmla v25.4s, v14.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v26.4s, v14.4s, v20.4s \n"
"fmla v27.4s, v14.4s, v21.4s \n"
"shll2 v14.4s, %27.8h, #16 \n"
"fmla v28.4s, v15.4s, v18.4s \n"
"fmla v29.4s, v15.4s, v19.4s \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"shll v15.4s, %25.4h, #16 \n"
"fmla v24.4s, v14.4s, v19.4s \n"
"fmla v25.4s, v14.4s, v20.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v26.4s, v14.4s, v21.4s \n"
"fmla v27.4s, v14.4s, v22.4s \n"
"shll v14.4s, %28.4h, #16 \n"
"fmla v28.4s, v15.4s, v19.4s \n"
"fmla v29.4s, v15.4s, v20.4s \n"
"fmla v30.4s, v15.4s, v21.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n"// r00 r01 r02 r03
"fmla v31.4s, v15.4s, v22.4s \n"
"shll2 v15.4s, %25.8h, #16 \n"
"fmla v24.4s, v14.4s, v20.4s \n"
"fmla v25.4s, v14.4s, v21.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v26.4s, v14.4s, v22.4s \n"
"fmla v27.4s, v14.4s, v23.4s \n"
"shll v14.4s, %16.4h, #16 \n"
"fmla v28.4s, v15.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v29.4s, v15.4s, v21.4s \n"
"fmla v30.4s, v15.4s, v22.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v31.4s, v15.4s, v23.4s \n"
"shll2 v15.4s, %16.8h, #16 \n"
"fmla v24.4s, v14.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v25.4s, v14.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v26.4s, v14.4s, v18.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2] \n"// r04 r05 r06 r07
"fmla v27.4s, v14.4s, v19.4s \n"
"shll v14.4s, %17.4h, #16 \n"
"fmla v24.4s, v15.4s, v17.4s \n"
"fmla v25.4s, v15.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v26.4s, v15.4s, v19.4s \n"
"fmla v27.4s, v15.4s, v20.4s \n"
"shll2 v15.4s, %17.8h, #16 \n"
"fmla v24.4s, v14.4s, v18.4s \n"
"fmla v25.4s, v14.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v26.4s, v14.4s, v20.4s \n"
"fmla v27.4s, v14.4s, v21.4s \n"
"shll v14.4s, %18.4h, #16 \n"
"fmla v24.4s, v15.4s, v19.4s \n"
"fmla v25.4s, v15.4s, v20.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v26.4s, v15.4s, v21.4s \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"// r50 r51 r52 r53
"fmla v27.4s, v15.4s, v22.4s \n"
"shll v15.4s, %26.4h, #16 \n"
"fmla v24.4s, v14.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v25.4s, v14.4s, v21.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v26.4s, v14.4s, v22.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v27.4s, v14.4s, v23.4s \n"
"shll2 v14.4s, %26.8h, #16 \n"
"fmla v28.4s, v15.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v29.4s, v15.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%7] \n"// r54 r55 r56 r57
"fmla v31.4s, v15.4s, v19.4s \n"
"shll v15.4s, %27.4h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v30.4s, v14.4s, v19.4s \n"
"fmla v31.4s, v14.4s, v20.4s \n"
"shll2 v14.4s, %27.8h, #16 \n"
"fmla v28.4s, v15.4s, v18.4s \n"
"fmla v29.4s, v15.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"shll v15.4s, %28.4h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v30.4s, v14.4s, v21.4s \n"
"fmla v31.4s, v14.4s, v22.4s \n"
"fmla v28.4s, v15.4s, v20.4s \n"
"fmla v29.4s, v15.4s, v21.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v30.4s, v15.4s, v22.4s \n"
"fmla v31.4s, v15.4s, v23.4s \n"
"shrn v24.4h, v24.4s, #16 \n"
"shrn v25.4h, v25.4s, #16 \n"
"shrn v26.4h, v26.4s, #16 \n"
"shrn v27.4h, v27.4s, #16 \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%0], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5) // %7
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"w"(_k00_01), // %16
"w"(_k02_03), // %17
"w"(_k04_10), // %18
"w"(_k11_12), // %19
"w"(_k13_14), // %20
"w"(_k20_21), // %21
"w"(_k22_23), // %22
"w"(_k24_30), // %23
"w"(_k31_32), // %24
"w"(_k33_34), // %25
"w"(_k40_41), // %26
"w"(_k42_43), // %27
"w"(_k44), // %28
"w"(_bias0) // %29
: "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
for (; j+1 < outw; j+=2)
{
asm volatile(
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v16.4h, v17.4h}, [%3], #16 \n"// r10 r11
"shll2 v14.4s, %18.8h, #16 \n"
"mov v28.16b, %29.16b \n"// sum00
"mov v29.16b, %29.16b \n"// sum01
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"mov v30.16b, %29.16b \n"// sum10
"mov v31.16b, %29.16b \n"// sum11
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%3] \n"// r12 r13 r14 r15
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v15.4s, %16.4h, #16 \n"
"fmla v29.4s, v14.4s, v17.4s \n"
"shll v14.4s, %19.4h, #16 \n"
"fmla v30.4s, v15.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v31.4s, v15.4s, v17.4s \n"
"shll2 v15.4s, %16.8h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"shll2 v14.4s, %19.8h, #16 \n"
"fmla v30.4s, v15.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v15.4s, v18.4s \n"
"shll v15.4s, %17.4h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v16.4h, v17.4h}, [%4], #16 \n"// r20 r21
"fmla v29.4s, v14.4s, v19.4s \n"
"shll v14.4s, %20.4h, #16 \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %17.8h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll2 v14.4s, %20.8h, #16 \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll v15.4s, %18.4h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v29.4s, v14.4s, v21.4s \n"
"shll v14.4s, %21.4h, #16 \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%4] \n"// r22 r23 r24 r25
"fmla v28.4s, v14.4s, v16.4s \n"
"shll2 v15.4s, %18.8h, #16 \n"
"fmla v29.4s, v14.4s, v17.4s \n"
"shll2 v14.4s, %21.8h, #16 \n"
"fmla v30.4s, v15.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v31.4s, v15.4s, v17.4s \n"
"shll v15.4s, %19.4h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"shll v14.4s, %22.4h, #16 \n"
"fmla v30.4s, v15.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v15.4s, v18.4s \n"
"shll2 v15.4s, %19.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v16.4h, v17.4h}, [%5], #16 \n"// r30 r31
"fmla v29.4s, v14.4s, v19.4s \n"
"shll2 v14.4s, %22.8h, #16 \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll v15.4s, %20.4h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll v14.4s, %23.4h, #16 \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll2 v15.4s, %20.8h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v29.4s, v14.4s, v21.4s \n"
"shll2 v14.4s, %23.8h, #16 \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%5] \n"// r32 r33 r34 r35
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v15.4s, %21.4h, #16 \n"
"fmla v29.4s, v14.4s, v17.4s \n"
"shll v14.4s, %24.4h, #16 \n"
"fmla v30.4s, v15.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v31.4s, v15.4s, v17.4s \n"
"shll2 v15.4s, %21.8h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"shll2 v14.4s, %24.8h, #16 \n"
"fmla v30.4s, v15.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v15.4s, v18.4s \n"
"shll v15.4s, %22.4h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v16.4h, v17.4h}, [%6], #16 \n"// r40 r41
"fmla v29.4s, v14.4s, v19.4s \n"
"shll v14.4s, %25.4h, #16 \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %22.8h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll2 v14.4s, %25.8h, #16 \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll v15.4s, %23.4h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v29.4s, v14.4s, v21.4s \n"
"shll v14.4s, %26.4h, #16 \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%6] \n"// r42 r43 r44 r45
"fmla v28.4s, v14.4s, v16.4s \n"
"shll2 v15.4s, %23.8h, #16 \n"
"fmla v29.4s, v14.4s, v17.4s \n"
"shll2 v14.4s, %26.8h, #16 \n"
"fmla v30.4s, v15.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v31.4s, v15.4s, v17.4s \n"
"shll v15.4s, %24.4h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"shll v14.4s, %27.4h, #16 \n"
"fmla v30.4s, v15.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v15.4s, v18.4s \n"
"shll2 v15.4s, %24.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v16.4h, v17.4h}, [%2], #16 \n"// r00 r01
"fmla v29.4s, v14.4s, v19.4s \n"
"shll2 v14.4s, %27.8h, #16 \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll v15.4s, %25.4h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v22.4h, v23.4h}, [%7], #16 \n"// r50 r51
"shll v16.4s, v16.4h, #16 \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll v14.4s, %28.4h, #16 \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll2 v15.4s, %25.8h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v29.4s, v14.4s, v21.4s \n"
"shll v14.4s, %16.4h, #16 \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%2] \n"// r02 r03 r04 r05
"shll v23.4s, v23.4h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v15.4s, %26.4h, #16 \n"
"fmla v29.4s, v14.4s, v17.4s \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7] \n"// r52 r53 r54 r55
"shll2 v14.4s, %16.8h, #16 \n"
"fmla v30.4s, v15.4s, v22.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v31.4s, v15.4s, v23.4s \n"
"shll2 v15.4s, %26.8h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"shll v14.4s, %17.4h, #16 \n"
"fmla v30.4s, v15.4s, v23.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v15.4s, v24.4s \n"
"shll v15.4s, %27.4h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v29.4s, v14.4s, v19.4s \n"
"shll2 v14.4s, %17.8h, #16 \n"
"fmla v30.4s, v15.4s, v24.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v31.4s, v15.4s, v25.4s \n"
"shll2 v15.4s, %27.8h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll v14.4s, %18.4h, #16 \n"
"fmla v30.4s, v15.4s, v25.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v31.4s, v15.4s, v26.4s \n"
"shll v15.4s, %28.4h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"fmla v29.4s, v14.4s, v21.4s \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v30.4s, v15.4s, v26.4s \n"
"fmla v31.4s, v15.4s, v27.4s \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v28.4h, v29.4h}, [%0], #16 \n"
"st1 {v30.4h, v31.4h}, [%1], #16 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5) // %7
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"w"(_k00_01), // %16
"w"(_k02_03), // %17
"w"(_k04_10), // %18
"w"(_k11_12), // %19
"w"(_k13_14), // %20
"w"(_k20_21), // %21
"w"(_k22_23), // %22
"w"(_k24_30), // %23
"w"(_k31_32), // %24
"w"(_k33_34), // %25
"w"(_k40_41), // %26
"w"(_k42_43), // %27
"w"(_k44), // %28
"w"(_bias0) // %29
: "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v16.4h}, [%3], #8 \n"// r10
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%3] \n"// r11 r12 r13 r14
"mov v30.16b, %29.16b \n"// sum00
"mov v31.16b, %29.16b \n"// sum10
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"shll2 v14.4s, %18.8h, #16 \n"
"shll v15.4s, %16.4h, #16 \n"
"fmul v28.4s, v14.4s, v16.4s \n"
"shll v14.4s, %19.4h, #16 \n"
"fmul v29.4s, v15.4s, v16.4s \n"
"shll2 v15.4s, %16.8h, #16 \n"
"fmla v30.4s, v14.4s, v17.4s \n"
"shll2 v14.4s, %19.8h, #16 \n"
"fmla v31.4s, v15.4s, v17.4s \n"
"shll v15.4s, %17.4h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"shll v14.4s, %20.4h, #16 \n"
"fmla v29.4s, v15.4s, v18.4s \n"
"shll2 v15.4s, %17.8h, #16 \n"
"fmla v30.4s, v14.4s, v19.4s \n"
"shll2 v14.4s, %20.8h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll v15.4s, %18.4h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"shll v14.4s, %21.4h, #16 \n"
"fmla v29.4s, v15.4s, v20.4s \n"
"prfm pldl1keep, [%4, #64] \n"
"ld1 {v16.4h}, [%4], #8 \n"// r20
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%4] \n"// r21 r22 r23 r24
"shll2 v15.4s, %18.8h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v30.4s, v14.4s, v16.4s \n"
"shll2 v14.4s, %21.8h, #16 \n"
"fmla v31.4s, v15.4s, v16.4s \n"
"shll v15.4s, %19.4h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"shll v14.4s, %22.4h, #16 \n"
"fmla v29.4s, v15.4s, v17.4s \n"
"shll2 v15.4s, %19.8h, #16 \n"
"fmla v30.4s, v14.4s, v18.4s \n"
"shll2 v14.4s, %22.8h, #16 \n"
"fmla v31.4s, v15.4s, v18.4s \n"
"shll v15.4s, %20.4h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"shll v14.4s, %23.4h, #16 \n"
"fmla v29.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %20.8h, #16 \n"
"fmla v30.4s, v14.4s, v20.4s \n"
"shll2 v14.4s, %23.8h, #16 \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"prfm pldl1keep, [%5, #64] \n"
"ld1 {v16.4h}, [%5], #8 \n"// r30
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%5] \n"// r31 r32 r33 r34
"shll v15.4s, %21.4h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v14.4s, %24.4h, #16 \n"
"fmla v29.4s, v15.4s, v16.4s \n"
"shll2 v15.4s, %21.8h, #16 \n"
"fmla v30.4s, v14.4s, v17.4s \n"
"shll2 v14.4s, %24.8h, #16 \n"
"fmla v31.4s, v15.4s, v17.4s \n"
"shll v15.4s, %22.4h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"shll v14.4s, %25.4h, #16 \n"
"fmla v29.4s, v15.4s, v18.4s \n"
"shll2 v15.4s, %22.8h, #16 \n"
"fmla v30.4s, v14.4s, v19.4s \n"
"shll2 v14.4s, %25.8h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll v15.4s, %23.4h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"shll v14.4s, %26.4h, #16 \n"
"fmla v29.4s, v15.4s, v20.4s \n"
"prfm pldl1keep, [%6, #64] \n"
"ld1 {v16.4h}, [%6], #8 \n"// r40
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%6] \n"// r41 r42 r43 r44
"shll2 v15.4s, %23.8h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v30.4s, v14.4s, v16.4s \n"
"shll2 v14.4s, %26.8h, #16 \n"
"fmla v31.4s, v15.4s, v16.4s \n"
"shll v15.4s, %24.4h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"shll v14.4s, %27.4h, #16 \n"
"fmla v29.4s, v15.4s, v17.4s \n"
"shll2 v15.4s, %24.8h, #16 \n"
"fmla v30.4s, v14.4s, v18.4s \n"
"shll2 v14.4s, %27.8h, #16 \n"
"fmla v31.4s, v15.4s, v18.4s \n"
"shll v15.4s, %25.4h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"shll v14.4s, %28.4h, #16 \n"
"fmla v29.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %25.8h, #16 \n"
"fmla v30.4s, v14.4s, v20.4s \n"
"shll v14.4s, %16.4h, #16 \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v16.4h}, [%2], #8 \n"// r00
"prfm pldl1keep, [%7, #64] \n"
"ld1 {v21.4h}, [%7], #8 \n"// r50
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v22.4h, v23.4h, v24.4h, v25.4h}, [%7] \n"// r51 r52 r53 r54
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%2] \n"// r01 r02 r03 r04
"shll v15.4s, %26.4h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v21.4s, v21.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"shll v22.4s, v22.4h, #16 \n"
"shll v23.4s, v23.4h, #16 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll2 v14.4s, %16.8h, #16 \n"
"fmla v29.4s, v15.4s, v21.4s \n"
"shll2 v15.4s, %26.8h, #16 \n"
"fmla v30.4s, v14.4s, v17.4s \n"
"shll v14.4s, %17.4h, #16 \n"
"fmla v31.4s, v15.4s, v22.4s \n"
"shll v15.4s, %27.4h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"shll2 v14.4s, %17.8h, #16 \n"
"fmla v29.4s, v15.4s, v23.4s \n"
"shll2 v15.4s, %27.8h, #16 \n"
"fmla v30.4s, v14.4s, v19.4s \n"
"shll v14.4s, %18.4h, #16 \n"
"fmla v31.4s, v15.4s, v24.4s \n"
"shll v15.4s, %28.4h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"fmla v29.4s, v15.4s, v25.4s \n"
"fadd v30.4s, v30.4s, v28.4s \n"
"fadd v31.4s, v31.4s, v29.4s \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v30.4h}, [%0], #8 \n"
"st1 {v31.4h}, [%1], #8 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5) // %7
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"w"(_k00_01), // %16
"w"(_k02_03), // %17
"w"(_k04_10), // %18
"w"(_k11_12), // %19
"w"(_k13_14), // %20
"w"(_k20_21), // %21
"w"(_k22_23), // %22
"w"(_k24_30), // %23
"w"(_k31_32), // %24
"w"(_k33_34), // %25
"w"(_k40_41), // %26
"w"(_k42_43), // %27
"w"(_k44), // %28
"w"(_bias0) // %29
: "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
r0 += 4*4 + w*4;
r1 += 4*4 + w*4;
r2 += 4*4 + w*4;
r3 += 4*4 + w*4;
r4 += 4*4 + w*4;
r5 += 4*4 + w*4;
outptr0 += outw*4;
outptr1 += outw*4;
}
#endif // __aarch64__
for (; i < outh; i++)
{
int j = 0;
for (; j+3 < outw; j+=4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n"// r00 r01 r02 r03
"shll v14.4s, %12.4h, #16 \n"
"mov v28.16b, %25.16b \n"// sum00
"mov v29.16b, %25.16b \n"// sum01
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"mov v30.16b, %25.16b \n"// sum02
"mov v31.16b, %25.16b \n"// sum03
"shll2 v15.4s, %12.8h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v29.4s, v14.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v30.4s, v14.4s, v18.4s \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%1] \n"// r04 r05 r06 r07
"fmla v31.4s, v14.4s, v19.4s \n"
"shll v14.4s, %13.4h, #16 \n"
"fmla v28.4s, v15.4s, v17.4s \n"
"fmla v29.4s, v15.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll2 v15.4s, %13.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"fmla v29.4s, v14.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v30.4s, v14.4s, v20.4s \n"
"fmla v31.4s, v14.4s, v21.4s \n"
"shll v14.4s, %14.4h, #16 \n"
"fmla v28.4s, v15.4s, v19.4s \n"
"fmla v29.4s, v15.4s, v20.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v30.4s, v15.4s, v21.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n"// r10 r11 r12 r13
"fmla v31.4s, v15.4s, v22.4s \n"
"shll2 v15.4s, %14.8h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v29.4s, v14.4s, v21.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v30.4s, v14.4s, v22.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v31.4s, v14.4s, v23.4s \n"
"shll v14.4s, %15.4h, #16 \n"
"fmla v28.4s, v15.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v29.4s, v15.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2] \n"// r14 r15 r16 r17
"fmla v31.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %15.8h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v30.4s, v14.4s, v19.4s \n"
"fmla v31.4s, v14.4s, v20.4s \n"
"shll v14.4s, %16.4h, #16 \n"
"fmla v28.4s, v15.4s, v18.4s \n"
"fmla v29.4s, v15.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"shll2 v15.4s, %16.8h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v30.4s, v14.4s, v21.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n"// r20 r21 r22 r23
"fmla v31.4s, v14.4s, v22.4s \n"
"shll v14.4s, %17.4h, #16 \n"
"fmla v28.4s, v15.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v29.4s, v15.4s, v21.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v30.4s, v15.4s, v22.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v31.4s, v15.4s, v23.4s \n"
"shll2 v15.4s, %17.8h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v29.4s, v14.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v30.4s, v14.4s, v18.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3] \n"// r24 r25 r26 r27
"fmla v31.4s, v14.4s, v19.4s \n"
"shll v14.4s, %18.4h, #16 \n"
"fmla v28.4s, v15.4s, v17.4s \n"
"fmla v29.4s, v15.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll2 v15.4s, %18.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"fmla v29.4s, v14.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v30.4s, v14.4s, v20.4s \n"
"fmla v31.4s, v14.4s, v21.4s \n"
"shll v14.4s, %19.4h, #16 \n"
"fmla v28.4s, v15.4s, v19.4s \n"
"fmla v29.4s, v15.4s, v20.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v30.4s, v15.4s, v21.4s \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%4], #32 \n"// r30 r31 r32 r33
"fmla v31.4s, v15.4s, v22.4s \n"
"shll2 v15.4s, %19.8h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v29.4s, v14.4s, v21.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v30.4s, v14.4s, v22.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v31.4s, v14.4s, v23.4s \n"
"shll v14.4s, %20.4h, #16 \n"
"fmla v28.4s, v15.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v29.4s, v15.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%4] \n"// r34 r35 r36 r37
"fmla v31.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %20.8h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v30.4s, v14.4s, v19.4s \n"
"fmla v31.4s, v14.4s, v20.4s \n"
"shll v14.4s, %21.4h, #16 \n"
"fmla v28.4s, v15.4s, v18.4s \n"
"fmla v29.4s, v15.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"shll2 v15.4s, %21.8h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v30.4s, v14.4s, v21.4s \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%5], #32 \n"// r40 r41 r42 r43
"fmla v31.4s, v14.4s, v22.4s \n"
"shll v14.4s, %22.4h, #16 \n"
"fmla v28.4s, v15.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v29.4s, v15.4s, v21.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v30.4s, v15.4s, v22.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v31.4s, v15.4s, v23.4s \n"
"shll2 v15.4s, %22.8h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v29.4s, v14.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v30.4s, v14.4s, v18.4s \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%5] \n"// r44 r45 r46 r47
"fmla v31.4s, v14.4s, v19.4s \n"
"shll v14.4s, %23.4h, #16 \n"
"fmla v28.4s, v15.4s, v17.4s \n"
"fmla v29.4s, v15.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll2 v15.4s, %23.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"fmla v29.4s, v14.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v30.4s, v14.4s, v20.4s \n"
"fmla v31.4s, v14.4s, v21.4s \n"
"shll v14.4s, %24.4h, #16 \n"
"fmla v28.4s, v15.4s, v19.4s \n"
"fmla v29.4s, v15.4s, v20.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v30.4s, v15.4s, v21.4s \n"
"fmla v31.4s, v15.4s, v22.4s \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"fmla v29.4s, v14.4s, v21.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v30.4s, v14.4s, v22.4s \n"
"fmla v31.4s, v14.4s, v23.4s \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4) // %5
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"w"(_k00_01), // %12
"w"(_k02_03), // %13
"w"(_k04_10), // %14
"w"(_k11_12), // %15
"w"(_k13_14), // %16
"w"(_k20_21), // %17
"w"(_k22_23), // %18
"w"(_k24_30), // %19
"w"(_k31_32), // %20
"w"(_k33_34), // %21
"w"(_k40_41), // %22
"w"(_k42_43), // %23
"w"(_k44), // %24
"w"(_bias0) // %25
: "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
#else // __aarch64__
asm volatile(
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2 :64]! \n"// r00 r01 r02 r03
"vshll.u16 q8, d20, #16 \n"// k00
"pld [%1, #128] \n"
"vld1.f32 {d24-d25}, [%1] \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vmov q13, q12 \n"// sum0 sum1
"vmov q14, q12 \n"
"vshll.u16 q9, d21, #16 \n"// k01
"vmov q15, q12 \n"// sum2 sum3
"vmla.f32 q12, q8, q0 \n"
"vshll.u16 q2, d6, #16 \n"
"vmla.f32 q13, q8, q1 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q8, q2 \n"
"pld [%2, #256] \n"
"vld1.u16 {d12-d15}, [%2 :64] \n"// r04 r05 r06 r07
"vmla.f32 q15, q8, q3 \n"
"vshll.u16 q10, d22, #16 \n"// k02
"vmla.f32 q12, q9, q1 \n"
"vmla.f32 q13, q9, q2 \n"
"vshll.u16 q4, d12, #16 \n"
"vmla.f32 q14, q9, q3 \n"
"vmla.f32 q15, q9, q4 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q11, d23, #16 \n"// k03
"vmla.f32 q12, q10, q2 \n"
"vmla.f32 q13, q10, q3 \n"
"vshll.u16 q5, d13, #16 \n"
"vmla.f32 q14, q10, q4 \n"
"vmla.f32 q15, q10, q5 \n"
"vshll.u16 q10, d16, #16 \n"// k04
"vmla.f32 q12, q11, q3 \n"
"vmla.f32 q13, q11, q4 \n"
"vshll.u16 q6, d14, #16 \n"
"vmla.f32 q14, q11, q5 \n"
"pld [%3, #256] \n"
"vld1.u16 {d4-d7}, [%3 :64]! \n"// r10 r11 r12 r13
"vmla.f32 q15, q11, q6 \n"
"vshll.u16 q11, d17, #16 \n"// k10
"vmla.f32 q12, q10, q4 \n"
"vshll.u16 q0, d4, #16 \n"
"vmla.f32 q13, q10, q5 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q14, q10, q6 \n"
"vshll.u16 q1, d5, #16 \n"
"vmla.f32 q15, q10, q7 \n"
"vshll.u16 q8, d18, #16 \n"// k11
"vmla.f32 q12, q11, q0 \n"
"vshll.u16 q2, d6, #16 \n"
"vmla.f32 q13, q11, q1 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q11, q2 \n"
"pld [%3, #256] \n"
"vld1.u16 {d12-d15}, [%3 :64] \n"// r14 r15 r16 r17
"vmla.f32 q15, q11, q3 \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q9, d19, #16 \n"// k12
"vmla.f32 q12, q8, q1 \n"
"vmla.f32 q13, q8, q2 \n"
"vshll.u16 q4, d12, #16 \n"
"vmla.f32 q14, q8, q3 \n"
"vmla.f32 q15, q8, q4 \n"
"vshll.u16 q8, d20, #16 \n"// k13
"vmla.f32 q12, q9, q2 \n"
"vmla.f32 q13, q9, q3 \n"
"vshll.u16 q5, d13, #16 \n"
"vmla.f32 q14, q9, q4 \n"
"vmla.f32 q15, q9, q5 \n"
"vshll.u16 q9, d21, #16 \n"// k14
"vmla.f32 q12, q8, q3 \n"
"vmla.f32 q13, q8, q4 \n"
"vshll.u16 q6, d14, #16 \n"
"vmla.f32 q14, q8, q5 \n"
"pld [%4, #256] \n"
"vld1.u16 {d4-d7}, [%4 :64]! \n"// r20 r21 r22 r23
"vmla.f32 q15, q8, q6 \n"
"vshll.u16 q10, d22, #16 \n"// k20
"vmla.f32 q12, q9, q4 \n"
"vshll.u16 q0, d4, #16 \n"
"vmla.f32 q13, q9, q5 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q14, q9, q6 \n"
"vshll.u16 q1, d5, #16 \n"
"vmla.f32 q15, q9, q7 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q11, d23, #16 \n"// k21
"vmla.f32 q12, q10, q0 \n"
"vshll.u16 q2, d6, #16 \n"
"vmla.f32 q13, q10, q1 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q10, q2 \n"
"pld [%4, #256] \n"
"vld1.u16 {d12-d15}, [%4 :64] \n"// r24 r25 r26 r27
"vmla.f32 q15, q10, q3 \n"
"vshll.u16 q10, d16, #16 \n"// k22
"vmla.f32 q12, q11, q1 \n"
"vmla.f32 q13, q11, q2 \n"
"vshll.u16 q4, d12, #16 \n"
"vmla.f32 q14, q11, q3 \n"
"vmla.f32 q15, q11, q4 \n"
"vshll.u16 q11, d17, #16 \n"// k23
"vmla.f32 q12, q10, q2 \n"
"vmla.f32 q13, q10, q3 \n"
"vshll.u16 q5, d13, #16 \n"
"vmla.f32 q14, q10, q4 \n"
"vmla.f32 q15, q10, q5 \n"
"vshll.u16 q8, d18, #16 \n"// k24
"vmla.f32 q12, q11, q3 \n"
"vmla.f32 q13, q11, q4 \n"
"vshll.u16 q6, d14, #16 \n"
"vmla.f32 q14, q11, q5 \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5 :64]! \n"// r30 r31 r32 r33
"vmla.f32 q15, q11, q6 \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q9, d19, #16 \n"// k30
"vmla.f32 q12, q8, q4 \n"
"vshll.u16 q0, d4, #16 \n"
"vmla.f32 q13, q8, q5 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q14, q8, q6 \n"
"vshll.u16 q1, d5, #16 \n"
"vmla.f32 q15, q8, q7 \n"
"vshll.u16 q8, d20, #16 \n"// k31
"vmla.f32 q12, q9, q0 \n"
"vshll.u16 q2, d6, #16 \n"
"vmla.f32 q13, q9, q1 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q9, q2 \n"
"pld [%5, #256] \n"
"vld1.u16 {d12-d15}, [%5 :64] \n"// r34 r35 r36 r37
"vmla.f32 q15, q9, q3 \n"
"vshll.u16 q9, d21, #16 \n"// k32
"vmla.f32 q12, q8, q1 \n"
"vmla.f32 q13, q8, q2 \n"
"vshll.u16 q4, d12, #16 \n"
"vmla.f32 q14, q8, q3 \n"
"vmla.f32 q15, q8, q4 \n"
"vshll.u16 q10, d22, #16 \n"// k33
"vmla.f32 q12, q9, q2 \n"
"vmla.f32 q13, q9, q3 \n"
"vshll.u16 q5, d13, #16 \n"
"vmla.f32 q14, q9, q4 \n"
"vmla.f32 q15, q9, q5 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vmla.f32 q12, q10, q3 \n"
"vshll.u16 q11, d23, #16 \n"// k34
"vmla.f32 q13, q10, q4 \n"
"vshll.u16 q6, d14, #16 \n"
"vmla.f32 q14, q10, q5 \n"
"pld [%6, #256] \n"
"vld1.u16 {d4-d7}, [%6 :64]! \n"// r40 r41 r42 r43
"vmla.f32 q15, q10, q6 \n"
"vshll.u16 q10, d16, #16 \n"// k40
"vmla.f32 q12, q11, q4 \n"
"vshll.u16 q0, d4, #16 \n"
"vmla.f32 q13, q11, q5 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q14, q11, q6 \n"
"vshll.u16 q1, d5, #16 \n"
"vmla.f32 q15, q11, q7 \n"
"vshll.u16 q11, d17, #16 \n"// k41
"vmla.f32 q12, q10, q0 \n"
"vshll.u16 q2, d6, #16 \n"
"vmla.f32 q13, q10, q1 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q10, q2 \n"
"pld [%6, #256] \n"
"vld1.u16 {d12-d15}, [%6 :64] \n"// r44 r45 r46 r47
"vmla.f32 q15, q10, q3 \n"
"vshll.u16 q8, d18, #16 \n"// k42
"vmla.f32 q12, q11, q1 \n"
"vmla.f32 q13, q11, q2 \n"
"vshll.u16 q4, d12, #16 \n"
"vmla.f32 q14, q11, q3 \n"
"vmla.f32 q15, q11, q4 \n"
"pld [%7, #64] \n"
"vld1.u16 {d20}, [%7 :64] \n"
"vmla.f32 q12, q8, q2 \n"
"vshll.u16 q9, d19, #16 \n"// k43
"vmla.f32 q13, q8, q3 \n"
"vshll.u16 q5, d13, #16 \n"
"vmla.f32 q14, q8, q4 \n"
"vmla.f32 q15, q8, q5 \n"
"vshll.u16 q8, d20, #16 \n"// k44
"vmla.f32 q12, q9, q3 \n"
"vmla.f32 q13, q9, q4 \n"
"vshll.u16 q6, d14, #16 \n"
"vmla.f32 q14, q9, q5 \n"
"vmla.f32 q15, q9, q6 \n"
"vmla.f32 q12, q8, q4 \n"
"vmla.f32 q13, q8, q5 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q14, q8, q6 \n"
"vmla.f32 q15, q8, q7 \n"
"sub %7, %7, #192 \n"// kptr -= 24 * 4;
"vshrn.u32 d24, q12, #16 \n"
"vshrn.u32 d25, q13, #16 \n"
"vshrn.u32 d26, q14, #16 \n"
"vshrn.u32 d27, q15, #16 \n"
"vst1.u16 {d24-d27}, [%0 :64]! \n"
: "=r"(outptr0), // %0
"=r"(bias0_data_ptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0),
"1"(bias0_data_ptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
for (; j+1 < outw; j+=2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v16.4h, v17.4h}, [%1], #16 \n"// r00 r01
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%1] \n"// r02 r03 r04 r05
"shll v14.4s, %12.4h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"mov v30.16b, %25.16b \n"// sum01
"mov v31.16b, %25.16b \n"// sum02
"shll2 v15.4s, %12.8h, #16 \n"
"fmul v28.4s, v14.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmul v29.4s, v14.4s, v17.4s \n"
"shll v14.4s, %13.4h, #16 \n"
"fmla v30.4s, v15.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v15.4s, v18.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v16.4h, v17.4h}, [%2], #16 \n"// r10 r11
"shll2 v15.4s, %13.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v29.4s, v14.4s, v19.4s \n"
"shll v14.4s, %14.4h, #16 \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll2 v15.4s, %14.8h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v29.4s, v14.4s, v21.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%2] \n"// r12 r13 r14 r15
"shll v14.4s, %15.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v30.4s, v15.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v31.4s, v15.4s, v17.4s \n"
"shll2 v15.4s, %15.8h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v16.4h, v17.4h}, [%3], #16 \n"// r20 r21
"shll v14.4s, %16.4h, #16 \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %16.8h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll v14.4s, %17.4h, #16 \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%3] \n"// r22 r23 r24 r25
"shll2 v15.4s, %17.8h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v29.4s, v14.4s, v17.4s \n"
"shll v14.4s, %18.4h, #16 \n"
"fmla v30.4s, v15.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v15.4s, v18.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v16.4h, v17.4h}, [%4], #16 \n"// r30 r31
"shll2 v15.4s, %18.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v29.4s, v14.4s, v19.4s \n"
"shll v14.4s, %19.4h, #16 \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll2 v15.4s, %19.8h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v29.4s, v14.4s, v21.4s \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%4] \n"// r32 r33 r34 r35
"shll v14.4s, %20.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v30.4s, v15.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v31.4s, v15.4s, v17.4s \n"
"shll2 v15.4s, %20.8h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v16.4h, v17.4h}, [%5], #16 \n"// r40 r41
"shll v14.4s, %21.4h, #16 \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %21.8h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll v14.4s, %22.4h, #16 \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%5] \n"// r42 r43 r44 r45
"shll2 v15.4s, %22.8h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v29.4s, v14.4s, v17.4s \n"
"shll v14.4s, %23.4h, #16 \n"
"fmla v30.4s, v15.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v15.4s, v18.4s \n"
"shll2 v15.4s, %23.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v29.4s, v14.4s, v19.4s \n"
"shll v14.4s, %24.4h, #16 \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"fmla v29.4s, v14.4s, v21.4s \n"
"fadd v30.4s, v30.4s, v28.4s \n"
"fadd v31.4s, v31.4s, v29.4s \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v30.4h, v31.4h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4) // %5
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"w"(_k00_01), // %12
"w"(_k02_03), // %13
"w"(_k04_10), // %14
"w"(_k11_12), // %15
"w"(_k13_14), // %16
"w"(_k20_21), // %17
"w"(_k22_23), // %18
"w"(_k24_30), // %19
"w"(_k31_32), // %20
"w"(_k33_34), // %21
"w"(_k40_41), // %22
"w"(_k42_43), // %23
"w"(_k44), // %24
"w"(_bias0) // %25
: "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
#else // __aarch64__
asm volatile(
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"pld [%2, #128] \n"
"vld1.u16 {d2-d3}, [%2 :64]! \n"// r00 r01
"vshll.u16 q8, d20, #16 \n"// k00
"pld [%1, #128] \n"
"vld1.f32 {d24-d25}, [%1] \n"
"pld [%2, #256] \n"
"vld1.u16 {d8-d11}, [%2 :64] \n"// r02 r03 r04 r05
"vshll.u16 q0, d2, #16 \n"
"vmov q13, q12 \n"// sum0 sum1
"vshll.u16 q1, d3, #16 \n"
"vshll.u16 q9, d21, #16 \n"// k01
"vmul.f32 q14, q8, q0 \n"
"vshll.u16 q2, d8, #16 \n"
"vmul.f32 q15, q8, q1 \n"
"vshll.u16 q10, d22, #16 \n"// k02
"vmla.f32 q12, q9, q1 \n"
"pld [%3, #128] \n"
"vld1.u16 {d2-d3}, [%3 :64]! \n"// r10 r11
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q13, q9, q2 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q11, d23, #16 \n"// k03
"vmla.f32 q14, q10, q2 \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q15, q10, q3 \n"
"vshll.u16 q10, d16, #16 \n"// k04
"vmla.f32 q12, q11, q3 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q11, q4 \n"
"vshll.u16 q11, d17, #16 \n"// k10
"vmla.f32 q14, q10, q4 \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q15, q10, q5 \n"
"pld [%3, #256] \n"
"vld1.u16 {d8-d11}, [%3 :64] \n"// r12 r13 r14 r15
"vshll.u16 q1, d3, #16 \n"
"vshll.u16 q8, d18, #16 \n"// k11
"vmla.f32 q12, q11, q0 \n"
"vshll.u16 q2, d8, #16 \n"
"vmla.f32 q13, q11, q1 \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q9, d19, #16 \n"// k12
"vmla.f32 q14, q8, q1 \n"
"pld [%4, #128] \n"
"vld1.u16 {d2-d3}, [%4 :64]! \n"// r20 r21
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q15, q8, q2 \n"
"vshll.u16 q8, d20, #16 \n"// k13
"vmla.f32 q12, q9, q2 \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q9, q3 \n"
"vshll.u16 q9, d21, #16 \n"// k14
"vmla.f32 q14, q8, q3 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q15, q8, q4 \n"
"vshll.u16 q10, d22, #16 \n"// k20
"vmla.f32 q12, q9, q4 \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q13, q9, q5 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"pld [%4, #256] \n"
"vld1.u16 {d8-d11}, [%4 :64] \n"// r22 r23 r24 r25
"vshll.u16 q1, d3, #16 \n"
"vshll.u16 q11, d23, #16 \n"// k21
"vmla.f32 q14, q10, q0 \n"
"vshll.u16 q2, d8, #16 \n"
"vmla.f32 q15, q10, q1 \n"
"vshll.u16 q10, d16, #16 \n"// k22
"vmla.f32 q12, q11, q1 \n"
"pld [%5, #128] \n"
"vld1.u16 {d2-d3}, [%5 :64]! \n"// r30 r31
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q13, q11, q2 \n"
"vshll.u16 q11, d17, #16 \n"// k23
"vmla.f32 q14, q10, q2 \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q15, q10, q3 \n"
"vshll.u16 q8, d18, #16 \n"// k24
"vmla.f32 q12, q11, q3 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q11, q4 \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q9, d19, #16 \n"// k30
"vmla.f32 q14, q8, q4 \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q15, q8, q5 \n"
"pld [%5, #256] \n"
"vld1.u16 {d8-d11}, [%5 :64] \n"// r32 r33 r34 r35
"vshll.u16 q1, d3, #16 \n"
"vshll.u16 q8, d20, #16 \n"// k31
"vmla.f32 q12, q9, q0 \n"
"vshll.u16 q2, d8, #16 \n"
"vmla.f32 q13, q9, q1 \n"
"vshll.u16 q9, d21, #16 \n"// k32
"vmla.f32 q14, q8, q1 \n"
"pld [%6, #128] \n"
"vld1.u16 {d2-d3}, [%6 :64]! \n"// r40 r41
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q15, q8, q2 \n"
"vshll.u16 q10, d22, #16 \n"// k33
"vmla.f32 q12, q9, q2 \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q9, q3 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q11, d23, #16 \n"// k34
"vmla.f32 q14, q10, q3 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q15, q10, q4 \n"
"vshll.u16 q10, d16, #16 \n"// k40
"vmla.f32 q12, q11, q4 \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q13, q11, q5 \n"
"pld [%6, #256] \n"
"vld1.u16 {d8-d11}, [%6 :64] \n"// r42 r43 r44 r45
"vshll.u16 q1, d3, #16 \n"
"vshll.u16 q11, d17, #16 \n"// k41
"vmla.f32 q14, q10, q0 \n"
"vshll.u16 q2, d8, #16 \n"
"vmla.f32 q15, q10, q1 \n"
"vshll.u16 q8, d18, #16 \n"// k42
"vmla.f32 q12, q11, q1 \n"
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q13, q11, q2 \n"
"pld [%7, #64] \n"
"vld1.u16 {d20}, [%7 :64] \n"
"vshll.u16 q9, d19, #16 \n"// k43
"vmla.f32 q14, q8, q2 \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q15, q8, q3 \n"
"vshll.u16 q8, d20, #16 \n"// k44
"vmla.f32 q12, q9, q3 \n"
"vmla.f32 q13, q9, q4 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q14, q8, q4 \n"
"vmla.f32 q15, q8, q5 \n"
"vadd.f32 q12, q12, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"sub %7, %7, #192 \n"// kptr -= 24 * 4;
"vshrn.u32 d24, q12, #16 \n"
"vshrn.u32 d25, q13, #16 \n"
"vst1.u16 {d24-d25}, [%0 :64]! \n"
: "=r"(outptr0), // %0
"=r"(bias0_data_ptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0),
"1"(bias0_data_ptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
for (; j < outw; j++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v16.4h}, [%1], #8 \n"// r00
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%1] \n"// r01 r02 r03 r04
"shll v14.4s, %12.4h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"mov v31.16b, %25.16b \n"// sum01
"shll2 v15.4s, %12.8h, #16 \n"
"fmul v28.4s, v14.4s, v16.4s \n"
"shll v14.4s, %13.4h, #16 \n"
"fmul v29.4s, v15.4s, v17.4s \n"
"shll2 v15.4s, %13.8h, #16 \n"
"fmul v30.4s, v14.4s, v18.4s \n"
"shll v14.4s, %14.4h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %14.8h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v16.4h}, [%2], #8 \n"// r10
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%2] \n"// r11 r12 r13 r14
"shll v14.4s, %15.4h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v29.4s, v15.4s, v16.4s \n"
"shll2 v15.4s, %15.8h, #16 \n"
"fmla v30.4s, v14.4s, v17.4s \n"
"shll v14.4s, %16.4h, #16 \n"
"fmla v31.4s, v15.4s, v18.4s \n"
"shll2 v15.4s, %16.8h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"shll v14.4s, %17.4h, #16 \n"
"fmla v29.4s, v15.4s, v20.4s \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v16.4h}, [%3], #8 \n"// r20
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%3] \n"// r21 r22 r23 r24
"shll2 v15.4s, %17.8h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v30.4s, v14.4s, v16.4s \n"
"shll v14.4s, %18.4h, #16 \n"
"fmla v31.4s, v15.4s, v17.4s \n"
"shll2 v15.4s, %18.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"shll v14.4s, %19.4h, #16 \n"
"fmla v29.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %19.8h, #16 \n"
"fmla v30.4s, v14.4s, v20.4s \n"
"prfm pldl1keep, [%4, #64] \n"
"ld1 {v16.4h}, [%4], #8 \n"// r30
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%4] \n"// r31 r32 r33 r34
"shll v14.4s, %20.4h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v31.4s, v15.4s, v16.4s \n"
"shll2 v15.4s, %20.8h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"shll v14.4s, %21.4h, #16 \n"
"fmla v29.4s, v15.4s, v18.4s \n"
"shll2 v15.4s, %21.8h, #16 \n"
"fmla v30.4s, v14.4s, v19.4s \n"
"shll v14.4s, %22.4h, #16 \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"prfm pldl1keep, [%5, #64] \n"
"ld1 {v16.4h}, [%5], #8 \n"// r40
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%5] \n"// r41 r42 r43 r44
"shll2 v15.4s, %22.8h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v14.4s, %23.4h, #16 \n"
"fmla v29.4s, v15.4s, v17.4s \n"
"shll2 v15.4s, %23.8h, #16 \n"
"fmla v30.4s, v14.4s, v18.4s \n"
"shll v14.4s, %24.4h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"fadd v29.4s, v29.4s, v30.4s \n"
"fadd v31.4s, v31.4s, v28.4s \n"
"fadd v31.4s, v31.4s, v29.4s \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v31.4h}, [%0], #8 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4) // %5
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"w"(_k00_01), // %12
"w"(_k02_03), // %13
"w"(_k04_10), // %14
"w"(_k11_12), // %15
"w"(_k13_14), // %16
"w"(_k20_21), // %17
"w"(_k22_23), // %18
"w"(_k24_30), // %19
"w"(_k31_32), // %20
"w"(_k33_34), // %21
"w"(_k40_41), // %22
"w"(_k42_43), // %23
"w"(_k44), // %24
"w"(_bias0) // %25
: "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
#else // __aarch64__
asm volatile(
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"pld [%1, #128] \n"
"vld1.f32 {d24-d25}, [%1] \n"// sum0
"pld [%2, #64] \n"
"vld1.u16 {d1}, [%2 :64]! \n"// r00
"vshll.u16 q8, d20, #16 \n"// k00
"pld [%2, #256] \n"
"vld1.u16 {d6-d9}, [%2 :64] \n"// r01 r02 r03 r04
"vshll.u16 q0, d1, #16 \n"
"vshll.u16 q9, d21, #16 \n"// k01
"vshll.u16 q1, d6, #16 \n"
"vmul.f32 q13, q8, q0 \n"
"pld [%3, #64] \n"
"vld1.u16 {d1}, [%3 :64]! \n"// r10
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q10, d22, #16 \n"// k02
"vmul.f32 q14, q9, q1 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q11, d23, #16 \n"// k03
"vmul.f32 q15, q10, q2 \n"
"vshll.u16 q4, d9, #16 \n"
"vshll.u16 q10, d16, #16 \n"// k04
"vmla.f32 q12, q11, q3 \n"
"vshll.u16 q0, d1, #16 \n"
"vshll.u16 q11, d17, #16 \n"// k10
"vmla.f32 q13, q10, q4 \n"
"pld [%3, #256] \n"
"vld1.u16 {d6-d9}, [%3 :64] \n"// r11 r12 r13 r14
"vshll.u16 q8, d18, #16 \n"// k11
"vshll.u16 q1, d6, #16 \n"
"vmla.f32 q14, q11, q0 \n"
"pld [%4, #64] \n"
"vld1.u16 {d1}, [%4 :64]! \n"// r20
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q9, d19, #16 \n"// k12
"vmla.f32 q15, q8, q1 \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q8, d20, #16 \n"// k13
"vmla.f32 q12, q9, q2 \n"
"vshll.u16 q4, d9, #16 \n"
"vshll.u16 q9, d21, #16 \n"// k14
"vmla.f32 q13, q8, q3 \n"
"vshll.u16 q0, d1, #16 \n"
"vshll.u16 q10, d22, #16 \n"// k20
"vmla.f32 q14, q9, q4 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"pld [%4, #256] \n"
"vld1.u16 {d6-d9}, [%4 :64] \n"// r21 r22 r23 r24
"vshll.u16 q11, d23, #16 \n"// k21
"vshll.u16 q1, d6, #16 \n"
"vmla.f32 q15, q10, q0 \n"
"pld [%5, #64] \n"
"vld1.u16 {d1}, [%5 :64]! \n"// r30
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q10, d16, #16 \n"// k22
"vmla.f32 q12, q11, q1 \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q11, d17, #16 \n"// k23
"vmla.f32 q13, q10, q2 \n"
"vshll.u16 q4, d9, #16 \n"
"vshll.u16 q8, d18, #16 \n"// k24
"vmla.f32 q14, q11, q3 \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q0, d1, #16 \n"
"vshll.u16 q9, d19, #16 \n"// k30
"vmla.f32 q15, q8, q4 \n"
"pld [%5, #256] \n"
"vld1.u16 {d6-d9}, [%5 :64] \n"// r31 r32 r33 r34
"vshll.u16 q8, d20, #16 \n"// k31
"vshll.u16 q1, d6, #16 \n"
"vmla.f32 q12, q9, q0 \n"
"pld [%6, #64] \n"
"vld1.u16 {d1}, [%6 :64]! \n"// r40
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q9, d21, #16 \n"// k32
"vmla.f32 q13, q8, q1 \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q10, d22, #16 \n"// k33
"vmla.f32 q14, q9, q2 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q4, d9, #16 \n"
"vshll.u16 q11, d23, #16 \n"// k34
"vmla.f32 q15, q10, q3 \n"
"vshll.u16 q0, d1, #16 \n"
"vshll.u16 q10, d16, #16 \n"// k40
"vmla.f32 q12, q11, q4 \n"
"pld [%6, #256] \n"
"vld1.u16 {d6-d9}, [%6 :64] \n"// r41 r42 r43 r44
"vshll.u16 q11, d17, #16 \n"// k41
"vshll.u16 q1, d6, #16 \n"
"vmla.f32 q13, q10, q0 \n"
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q8, d18, #16 \n"// k42
"vmla.f32 q14, q11, q1 \n"
"pld [%7, #64] \n"
"vld1.u16 {d20}, [%7 :64] \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q9, d19, #16 \n"// k43
"vmla.f32 q15, q8, q2 \n"
"vshll.u16 q4, d9, #16 \n"
"vshll.u16 q8, d20, #16 \n"// k44
"vmla.f32 q12, q9, q3 \n"
"vmla.f32 q13, q8, q4 \n"
"vadd.f32 q14, q14, q15 \n"
"vadd.f32 q12, q12, q13 \n"
"vadd.f32 q12, q12, q14 \n"
"sub %7, %7, #192 \n"// kptr -= 24 * 4;
"vshrn.u32 d24, q12, #16 \n"
"vst1.u16 {d24}, [%0 :64]! \n"
: "=r"(outptr0), // %0
"=r"(bias0_data_ptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0),
"1"(bias0_data_ptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
r0 += 4*4;
r1 += 4*4;
r2 += 4*4;
r3 += 4*4;
r4 += 4*4;
}
}
}
static void convdw5x5s2_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2*outw + w) * 4;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g=0; g<group; g++)
{
Mat out = top_blob.channel(g);
#if __aarch64__
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f);
#endif // __aarch64__
const unsigned short* kptr = kernel.row<const unsigned short>(g);
unsigned short* outptr0 = out;
const Mat img0 = bottom_blob.channel(g);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
const unsigned short* r3 = img0.row<const unsigned short>(3);
const unsigned short* r4 = img0.row<const unsigned short>(4);
#if __aarch64__
// 4 * 25
uint16x8_t _k00_01 = vld1q_u16(kptr);
uint16x8_t _k02_03 = vld1q_u16(kptr+8);
uint16x8_t _k04_10 = vld1q_u16(kptr+16);
uint16x8_t _k11_12 = vld1q_u16(kptr+24);
uint16x8_t _k13_14 = vld1q_u16(kptr+32);
uint16x8_t _k20_21 = vld1q_u16(kptr+40);
uint16x8_t _k22_23 = vld1q_u16(kptr+48);
uint16x8_t _k24_30 = vld1q_u16(kptr+56);
uint16x8_t _k31_32 = vld1q_u16(kptr+64);
uint16x8_t _k33_34 = vld1q_u16(kptr+72);
uint16x8_t _k40_41 = vld1q_u16(kptr+80);
uint16x8_t _k42_43 = vld1q_u16(kptr+88);
uint16x4_t _k44 = vld1_u16(kptr+96);
#else // __aarch64__
float bias0_data[4];
if (bias)
{
bias0_data[0] = bias[g * 4 + 0];
bias0_data[1] = bias[g * 4 + 1];
bias0_data[2] = bias[g * 4 + 2];
bias0_data[3] = bias[g * 4 + 3];
}
else
{
bias0_data[0] = 0.f;
bias0_data[1] = 0.f;
bias0_data[2] = 0.f;
bias0_data[3] = 0.f;
}
const float* bias0_data_ptr = bias0_data;
#endif // __aarch64__
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j+3 < outw; j+=4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n"// r00 r01 r02 r03
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%1], #32 \n"// r04 r05 r06 r07
"shll v14.4s, %12.4h, #16 \n"
"mov v28.16b, %25.16b \n"// sum00
"mov v29.16b, %25.16b \n"// sum01
"mov v30.16b, %25.16b \n"// sum02
"mov v31.16b, %25.16b \n"// sum03
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"prfm pldl1keep, [%1, #192] \n"
"ld1 {v24.4h, v25.4h, v26.4h}, [%1] \n"// r08 r09 r010
"shll2 v15.4s, %12.8h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v30.4s, v14.4s, v20.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v14.4s, v22.4s \n"
"shll v14.4s, %13.4h, #16 \n"
"fmla v28.4s, v15.4s, v17.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v29.4s, v15.4s, v19.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v30.4s, v15.4s, v21.4s \n"
"fmla v31.4s, v15.4s, v23.4s \n"
"shll2 v15.4s, %13.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v30.4s, v14.4s, v22.4s \n"
"fmla v31.4s, v14.4s, v24.4s \n"
"shll v14.4s, %14.4h, #16 \n"
"fmla v28.4s, v15.4s, v19.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n"// r10 r11 r12 r13
"fmla v29.4s, v15.4s, v21.4s \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v30.4s, v15.4s, v23.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v31.4s, v15.4s, v25.4s \n"
"shll2 v15.4s, %14.8h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v29.4s, v14.4s, v22.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n"// r14 r15 r16 r17
"shll v26.4s, v26.4h, #16 \n"
"fmla v30.4s, v14.4s, v24.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v31.4s, v14.4s, v26.4s \n"
"prfm pldl1keep, [%2, #192] \n"
"ld1 {v24.4h, v25.4h, v26.4h}, [%2] \n"// r18 r19 r110
"shll v14.4s, %15.4h, #16 \n"
"fmla v28.4s, v15.4s, v16.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v29.4s, v15.4s, v18.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v15.4s, v22.4s \n"
"shll2 v15.4s, %15.8h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v29.4s, v14.4s, v19.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v30.4s, v14.4s, v21.4s \n"
"fmla v31.4s, v14.4s, v23.4s \n"
"shll v14.4s, %16.4h, #16 \n"
"fmla v28.4s, v15.4s, v18.4s \n"
"fmla v29.4s, v15.4s, v20.4s \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v30.4s, v15.4s, v22.4s \n"
"fmla v31.4s, v15.4s, v24.4s \n"
"shll2 v15.4s, %16.8h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n"// r20 r21 r22 r23
"fmla v29.4s, v14.4s, v21.4s \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v30.4s, v14.4s, v23.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v31.4s, v14.4s, v25.4s \n"
"shll v14.4s, %17.4h, #16 \n"
"fmla v28.4s, v15.4s, v20.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v29.4s, v15.4s, v22.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n"// r24 r25 r26 r27
"shll v26.4s, v26.4h, #16 \n"
"fmla v30.4s, v15.4s, v24.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v31.4s, v15.4s, v26.4s \n"
"prfm pldl1keep, [%3, #192] \n"
"ld1 {v24.4h, v25.4h, v26.4h}, [%3] \n"// r28 r29 r210
"shll2 v15.4s, %17.8h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v30.4s, v14.4s, v20.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v14.4s, v22.4s \n"
"shll v14.4s, %18.4h, #16 \n"
"fmla v28.4s, v15.4s, v17.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v29.4s, v15.4s, v19.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v30.4s, v15.4s, v21.4s \n"
"fmla v31.4s, v15.4s, v23.4s \n"
"shll2 v15.4s, %18.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v30.4s, v14.4s, v22.4s \n"
"fmla v31.4s, v14.4s, v24.4s \n"
"shll v14.4s, %19.4h, #16 \n"
"fmla v28.4s, v15.4s, v19.4s \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%4], #32 \n"// r30 r31 r32 r33
"fmla v29.4s, v15.4s, v21.4s \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v30.4s, v15.4s, v23.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v31.4s, v15.4s, v25.4s \n"
"shll2 v15.4s, %19.8h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v29.4s, v14.4s, v22.4s \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%4], #32 \n"// r34 r35 r36 r37
"shll v26.4s, v26.4h, #16 \n"
"fmla v30.4s, v14.4s, v24.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v31.4s, v14.4s, v26.4s \n"
"prfm pldl1keep, [%4, #192] \n"
"ld1 {v24.4h, v25.4h, v26.4h}, [%4] \n"// r38 r39 r310
"shll v14.4s, %20.4h, #16 \n"
"fmla v28.4s, v15.4s, v16.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v29.4s, v15.4s, v18.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v15.4s, v22.4s \n"
"shll2 v15.4s, %20.8h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v29.4s, v14.4s, v19.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v30.4s, v14.4s, v21.4s \n"
"fmla v31.4s, v14.4s, v23.4s \n"
"shll v14.4s, %21.4h, #16 \n"
"fmla v28.4s, v15.4s, v18.4s \n"
"fmla v29.4s, v15.4s, v20.4s \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v30.4s, v15.4s, v22.4s \n"
"fmla v31.4s, v15.4s, v24.4s \n"
"shll2 v15.4s, %21.8h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%5], #32 \n"// r40 r41 r42 r43
"fmla v29.4s, v14.4s, v21.4s \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v30.4s, v14.4s, v23.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v31.4s, v14.4s, v25.4s \n"
"shll v14.4s, %22.4h, #16 \n"
"fmla v28.4s, v15.4s, v20.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v29.4s, v15.4s, v22.4s \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%5], #32 \n"// r44 r45 r46 r47
"shll v26.4s, v26.4h, #16 \n"
"fmla v30.4s, v15.4s, v24.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v31.4s, v15.4s, v26.4s \n"
"prfm pldl1keep, [%5, #192] \n"
"ld1 {v24.4h, v25.4h, v26.4h}, [%5] \n"// r48 r49 r410
"shll2 v15.4s, %22.8h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v30.4s, v14.4s, v20.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v14.4s, v22.4s \n"
"shll v14.4s, %23.4h, #16 \n"
"fmla v28.4s, v15.4s, v17.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v29.4s, v15.4s, v19.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v30.4s, v15.4s, v21.4s \n"
"fmla v31.4s, v15.4s, v23.4s \n"
"shll2 v15.4s, %23.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v30.4s, v14.4s, v22.4s \n"
"fmla v31.4s, v14.4s, v24.4s \n"
"shll v14.4s, %24.4h, #16 \n"
"fmla v28.4s, v15.4s, v19.4s \n"
"fmla v29.4s, v15.4s, v21.4s \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v30.4s, v15.4s, v23.4s \n"
"fmla v31.4s, v15.4s, v25.4s \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"fmla v29.4s, v14.4s, v22.4s \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v30.4s, v14.4s, v24.4s \n"
"fmla v31.4s, v14.4s, v26.4s \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4) // %5
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"w"(_k00_01), // %12
"w"(_k02_03), // %13
"w"(_k04_10), // %14
"w"(_k11_12), // %15
"w"(_k13_14), // %16
"w"(_k20_21), // %17
"w"(_k22_23), // %18
"w"(_k24_30), // %19
"w"(_k31_32), // %20
"w"(_k33_34), // %21
"w"(_k40_41), // %22
"w"(_k42_43), // %23
"w"(_k44), // %24
"w"(_bias0) // %25
: "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
#else // __aarch64__
asm volatile(
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"pld [%1, #128] \n"
"vld1.f32 {d24-d25}, [%1] \n"
"vmov q13, q12 \n"// sum0 sum1
"vshll.u16 q8, d20, #16 \n"// k00
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2 :64]! \n"// r00 r01 r02 r03
"vmov q14, q12 \n"
"vmov q15, q12 \n"// sum2 sum3
"vshll.u16 q9, d21, #16 \n"// k01
"pld [%2, #256] \n"
"vld1.u16 {d12-d15}, [%2 :64]! \n"// r04 r05 r06 r07
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vmla.f32 q12, q8, q0 \n"
"vmla.f32 q13, q8, q2 \n"
"vshll.u16 q6, d14, #16 \n"
"vmla.f32 q14, q8, q4 \n"
"vmla.f32 q15, q8, q6 \n"
"vshll.u16 q10, d22, #16 \n"// k02
"vmla.f32 q12, q9, q1 \n"
"vmla.f32 q13, q9, q3 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q14, q9, q5 \n"
"vmla.f32 q15, q9, q7 \n"
"pld [%2, #128] \n"
"vld1.u16 {d2-d3}, [%2 :64]! \n"// r08 r09
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vmla.f32 q12, q10, q2 \n"
"vshll.u16 q11, d23, #16 \n"// k03
"vmla.f32 q13, q10, q4 \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q14, q10, q6 \n"
"vmla.f32 q15, q10, q0 \n"
"vshll.u16 q10, d16, #16 \n"// k04
"vmla.f32 q12, q11, q3 \n"
"vmla.f32 q13, q11, q5 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q14, q11, q7 \n"
"vmla.f32 q15, q11, q1 \n"
"pld [%2, #64] \n"
"vld1.u16 {d5}, [%2 :64] \n"// r010
"vmla.f32 q12, q10, q4 \n"
"vshll.u16 q11, d17, #16 \n"// k10
"vmla.f32 q13, q10, q6 \n"
"vshll.u16 q2, d5, #16 \n"
"vmla.f32 q14, q10, q0 \n"
"pld [%3, #256] \n"
"vld1.u16 {d12-d15}, [%3 :64]! \n"// r10 r11 r12 r13
"vmla.f32 q15, q10, q2 \n"
"vshll.u16 q8, d18, #16 \n"// k11
"pld [%3, #256] \n"
"vld1.u16 {d4-d7}, [%3 :64]! \n"// r14 r15 r16 r17
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vmla.f32 q12, q11, q4 \n"
"vmla.f32 q13, q11, q6 \n"
"vshll.u16 q2, d6, #16 \n"
"vmla.f32 q14, q11, q0 \n"
"vmla.f32 q15, q11, q2 \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vmla.f32 q12, q8, q5 \n"
"vshll.u16 q9, d19, #16 \n"// k12
"vmla.f32 q13, q8, q7 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q8, q1 \n"
"vmla.f32 q15, q8, q3 \n"
"pld [%3, #128] \n"
"vld1.u16 {d10-d11}, [%3 :64]! \n"// r18 r19
"vmla.f32 q12, q9, q6 \n"
"vshll.u16 q8, d20, #16 \n"// k13
"vmla.f32 q13, q9, q0 \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q14, q9, q2 \n"
"vmla.f32 q15, q9, q4 \n"
"vshll.u16 q9, d21, #16 \n"// k14
"vmla.f32 q12, q8, q7 \n"
"vmla.f32 q13, q8, q1 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q14, q8, q3 \n"
"vmla.f32 q15, q8, q5 \n"
"pld [%3, #64] \n"
"vld1.u16 {d13}, [%3 :64] \n"// r110
"vmla.f32 q12, q9, q0 \n"
"vshll.u16 q10, d22, #16 \n"// k20
"vmla.f32 q13, q9, q2 \n"
"vshll.u16 q6, d13, #16 \n"
"vmla.f32 q14, q9, q4 \n"
"pld [%4, #256] \n"
"vld1.u16 {d4-d7}, [%4 :64]! \n"// r20 r21 r22 r23
"vmla.f32 q15, q9, q6 \n"
"vshll.u16 q11, d23, #16 \n"// k21
"pld [%4, #256] \n"
"vld1.u16 {d12-d15}, [%4 :64]! \n"// r24 r25 r26 r27
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vmla.f32 q12, q10, q0 \n"
"vmla.f32 q13, q10, q2 \n"
"vshll.u16 q6, d14, #16 \n"
"vmla.f32 q14, q10, q4 \n"
"vmla.f32 q15, q10, q6 \n"
"vshll.u16 q10, d16, #16 \n"// k22
"vmla.f32 q12, q11, q1 \n"
"vmla.f32 q13, q11, q3 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q14, q11, q5 \n"
"vmla.f32 q15, q11, q7 \n"
"pld [%4, #128] \n"
"vld1.u16 {d2-d3}, [%4 :64]! \n"// r28 r29
"vmla.f32 q12, q10, q2 \n"
"vshll.u16 q11, d17, #16 \n"// k23
"vmla.f32 q13, q10, q4 \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q14, q10, q6 \n"
"vmla.f32 q15, q10, q0 \n"
"vshll.u16 q8, d18, #16 \n"// k24
"vmla.f32 q12, q11, q3 \n"
"vmla.f32 q13, q11, q5 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q14, q11, q7 \n"
"vmla.f32 q15, q11, q1 \n"
"pld [%4, #64] \n"
"vld1.u16 {d5}, [%4 :64] \n"// r210
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vmla.f32 q12, q8, q4 \n"
"vshll.u16 q9, d19, #16 \n"// k30
"vmla.f32 q13, q8, q6 \n"
"vshll.u16 q2, d5, #16 \n"
"vmla.f32 q14, q8, q0 \n"
"pld [%5, #256] \n"
"vld1.u16 {d12-d15}, [%5 :64]! \n"// r30 r31 r32 r33
"vmla.f32 q15, q8, q2 \n"
"vshll.u16 q8, d20, #16 \n"// k31
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5 :64]! \n"// r34 r35 r36 r37
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vmla.f32 q12, q9, q4 \n"
"vmla.f32 q13, q9, q6 \n"
"vshll.u16 q2, d6, #16 \n"
"vmla.f32 q14, q9, q0 \n"
"vmla.f32 q15, q9, q2 \n"
"vshll.u16 q9, d21, #16 \n"// k32
"vmla.f32 q12, q8, q5 \n"
"vmla.f32 q13, q8, q7 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q8, q1 \n"
"vmla.f32 q15, q8, q3 \n"
"pld [%5, #128] \n"
"vld1.u16 {d10-d11}, [%5 :64]! \n"// r38 r39
"vmla.f32 q12, q9, q6 \n"
"vshll.u16 q10, d22, #16 \n"// k33
"vmla.f32 q13, q9, q0 \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q14, q9, q2 \n"
"vmla.f32 q15, q9, q4 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vmla.f32 q12, q10, q7 \n"
"vshll.u16 q11, d23, #16 \n"// k34
"vmla.f32 q13, q10, q1 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q14, q10, q3 \n"
"vmla.f32 q15, q10, q5 \n"
"pld [%5, #64] \n"
"vld1.u16 {d13}, [%5 :64] \n"// r310
"vmla.f32 q12, q11, q0 \n"
"vshll.u16 q10, d16, #16 \n"// k40
"vmla.f32 q13, q11, q2 \n"
"vshll.u16 q6, d13, #16 \n"
"vmla.f32 q14, q11, q4 \n"
"pld [%6, #256] \n"
"vld1.u16 {d4-d7}, [%6 :64]! \n"// r40 r41 r42 r43
"vmla.f32 q15, q11, q6 \n"
"vshll.u16 q11, d17, #16 \n"// k41
"pld [%6, #256] \n"
"vld1.u16 {d12-d15}, [%6 :64]! \n"// r44 r45 r46 r47
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vmla.f32 q12, q10, q0 \n"
"vmla.f32 q13, q10, q2 \n"
"vshll.u16 q6, d14, #16 \n"
"vmla.f32 q14, q10, q4 \n"
"vmla.f32 q15, q10, q6 \n"
"vshll.u16 q8, d18, #16 \n"// k42
"vmla.f32 q12, q11, q1 \n"
"vmla.f32 q13, q11, q3 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q14, q11, q5 \n"
"pld [%7, #64] \n"
"vld1.u16 {d20}, [%7 :64] \n"
"vmla.f32 q15, q11, q7 \n"
"pld [%6, #128] \n"
"vld1.u16 {d2-d3}, [%6 :64]! \n"// r48 r49
"vmla.f32 q12, q8, q2 \n"
"vshll.u16 q9, d19, #16 \n"// k43
"vmla.f32 q13, q8, q4 \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q14, q8, q6 \n"
"vmla.f32 q15, q8, q0 \n"
"vshll.u16 q8, d20, #16 \n"// k44
"vmla.f32 q12, q9, q3 \n"
"vmla.f32 q13, q9, q5 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q14, q9, q7 \n"
"vmla.f32 q15, q9, q1 \n"
"pld [%6, #64] \n"
"vld1.u16 {d5}, [%6 :64] \n"// r410
"vmla.f32 q12, q8, q4 \n"
"vmla.f32 q13, q8, q6 \n"
"vshll.u16 q2, d5, #16 \n"
"vmla.f32 q14, q8, q0 \n"
"vmla.f32 q15, q8, q2 \n"
"sub %7, %7, #192 \n"// kptr -= 24 * 4;
"sub %2, %2, #16 \n"
"sub %3, %3, #16 \n"
"sub %4, %4, #16 \n"
"sub %5, %5, #16 \n"
"sub %6, %6, #16 \n"
"vshrn.u32 d24, q12, #16 \n"
"vshrn.u32 d25, q13, #16 \n"
"vshrn.u32 d26, q14, #16 \n"
"vshrn.u32 d27, q15, #16 \n"
"vst1.u16 {d24-d27}, [%0 :64]! \n"
: "=r"(outptr0), // %0
"=r"(bias0_data_ptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0),
"1"(bias0_data_ptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
for (; j+1 < outw; j+=2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n"// r00 r01 r02 r03
"prfm pldl1keep, [%1, #192] \n"
"ld1 {v20.4h, v21.4h, v22.4h}, [%1] \n"// r04 r05 r06
"shll v14.4s, %12.4h, #16 \n"
"shll2 v15.4s, %12.8h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"mov v30.16b, %25.16b \n"// sum00
"mov v31.16b, %25.16b \n"// sum01
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmul v28.4s, v14.4s, v16.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmul v29.4s, v14.4s, v18.4s \n"
"shll v14.4s, %13.4h, #16 \n"
"fmla v30.4s, v15.4s, v17.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %13.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll v14.4s, %14.4h, #16 \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n"// r10 r11 r12 r13
"shll v22.4s, v22.4h, #16 \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"shll2 v15.4s, %14.8h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v29.4s, v14.4s, v22.4s \n"
"prfm pldl1keep, [%2, #192] \n"
"ld1 {v20.4h, v21.4h, v22.4h}, [%2] \n"// r14 r15 r16
"shll v14.4s, %15.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v30.4s, v15.4s, v16.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v15.4s, v18.4s \n"
"shll2 v15.4s, %15.8h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v29.4s, v14.4s, v19.4s \n"
"shll v14.4s, %16.4h, #16 \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll2 v15.4s, %16.8h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n"// r20 r21 r22 r23
"shll v22.4s, v22.4h, #16 \n"
"fmla v29.4s, v14.4s, v21.4s \n"
"shll v14.4s, %17.4h, #16 \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v31.4s, v15.4s, v22.4s \n"
"prfm pldl1keep, [%3, #192] \n"
"ld1 {v20.4h, v21.4h, v22.4h}, [%3] \n"// r24 r25 r26
"shll2 v15.4s, %17.8h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"shll v14.4s, %18.4h, #16 \n"
"fmla v30.4s, v15.4s, v17.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %18.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll v14.4s, %19.4h, #16 \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%4], #32 \n"// r30 r31 r32 r33
"shll v22.4s, v22.4h, #16 \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"shll2 v15.4s, %19.8h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v29.4s, v14.4s, v22.4s \n"
"prfm pldl1keep, [%4, #192] \n"
"ld1 {v20.4h, v21.4h, v22.4h}, [%4] \n"// r34 r35 r36
"shll v14.4s, %20.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v30.4s, v15.4s, v16.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v15.4s, v18.4s \n"
"shll2 v15.4s, %20.8h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v29.4s, v14.4s, v19.4s \n"
"shll v14.4s, %21.4h, #16 \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll2 v15.4s, %21.8h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%5], #32 \n"// r40 r41 r42 r43
"shll v22.4s, v22.4h, #16 \n"
"fmla v29.4s, v14.4s, v21.4s \n"
"shll v14.4s, %22.4h, #16 \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v31.4s, v15.4s, v22.4s \n"
"prfm pldl1keep, [%5, #192] \n"
"ld1 {v20.4h, v21.4h, v22.4h}, [%5] \n"// r44 r45 r46
"shll2 v15.4s, %22.8h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"shll v14.4s, %23.4h, #16 \n"
"fmla v30.4s, v15.4s, v17.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %23.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll v14.4s, %24.4h, #16 \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"fmla v29.4s, v14.4s, v22.4s \n"
"fadd v30.4s, v30.4s, v28.4s \n"
"fadd v31.4s, v31.4s, v29.4s \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v30.4h, v31.4h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4) // %5
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"w"(_k00_01), // %12
"w"(_k02_03), // %13
"w"(_k04_10), // %14
"w"(_k11_12), // %15
"w"(_k13_14), // %16
"w"(_k20_21), // %17
"w"(_k22_23), // %18
"w"(_k24_30), // %19
"w"(_k31_32), // %20
"w"(_k33_34), // %21
"w"(_k40_41), // %22
"w"(_k42_43), // %23
"w"(_k44), // %24
"w"(_bias0) // %25
: "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
#else // __aarch64__
asm volatile(
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"pld [%1, #128] \n"
"vld1.f32 {d24-d25}, [%1] \n"
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2 :64]! \n"// r00 r01 r02 r03
"vshll.u16 q8, d20, #16 \n"// k00
"pld [%2, #256] \n"
"vld1.u16 {d10-d12}, [%2 :64] \n"// r04 r05 r06
"vmov q13, q12 \n"// sum0 sum1
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q9, d21, #16 \n"// k01
"vmul.f32 q14, q8, q0 \n"
"vshll.u16 q4, d10, #16 \n"
"vmul.f32 q15, q8, q2 \n"
"vshll.u16 q10, d22, #16 \n"// k02
"vmla.f32 q12, q9, q1 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q9, q3 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vmla.f32 q14, q10, q2 \n"
"vshll.u16 q11, d23, #16 \n"// k03
"vmla.f32 q15, q10, q4 \n"
"vshll.u16 q10, d16, #16 \n"// k04
"vmla.f32 q12, q11, q3 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q13, q11, q5 \n"
"vshll.u16 q11, d17, #16 \n"// k10
"vmla.f32 q14, q10, q4 \n"
"vmla.f32 q15, q10, q6 \n"
"pld [%3, #256] \n"
"vld1.u16 {d4-d7}, [%3 :64]! \n"// r10 r11 r12 r13
"vshll.u16 q8, d18, #16 \n"// k11
"pld [%3, #256] \n"
"vld1.u16 {d10-d12}, [%3 :64] \n"// r14 r15 r16
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q12, q11, q0 \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q11, q2 \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vmla.f32 q14, q8, q1 \n"
"vshll.u16 q9, d19, #16 \n"// k12
"vmla.f32 q15, q8, q3 \n"
"vshll.u16 q8, d20, #16 \n"// k13
"vmla.f32 q12, q9, q2 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q9, q4 \n"
"vshll.u16 q9, d21, #16 \n"// k14
"vmla.f32 q14, q8, q3 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q15, q8, q5 \n"
"vshll.u16 q10, d22, #16 \n"// k20
"vmla.f32 q12, q9, q4 \n"
"vmla.f32 q13, q9, q6 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"pld [%4, #256] \n"
"vld1.u16 {d4-d7}, [%4 :64]! \n"// r20 r21 r22 r23
"vshll.u16 q11, d23, #16 \n"// k21
"pld [%4, #256] \n"
"vld1.u16 {d10-d12}, [%4 :64] \n"// r24 r25 r26
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q10, q0 \n"
"vmla.f32 q15, q10, q2 \n"
"vshll.u16 q10, d16, #16 \n"// k22
"vmla.f32 q12, q11, q1 \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q11, q3 \n"
"vshll.u16 q11, d17, #16 \n"// k23
"vmla.f32 q14, q10, q2 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q15, q10, q4 \n"
"vshll.u16 q8, d18, #16 \n"// k24
"vmla.f32 q12, q11, q3 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q13, q11, q5 \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vmla.f32 q14, q8, q4 \n"
"vshll.u16 q9, d19, #16 \n"// k30
"vmla.f32 q15, q8, q6 \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5 :64]! \n"// r30 r31 r32 r33
"vshll.u16 q8, d20, #16 \n"// k31
"pld [%5, #256] \n"
"vld1.u16 {d10-d12}, [%5 :64] \n"// r34 r35 r36
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q12, q9, q0 \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q9, q2 \n"
"vshll.u16 q9, d21, #16 \n"// k32
"vmla.f32 q14, q8, q1 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q15, q8, q3 \n"
"vshll.u16 q10, d22, #16 \n"// k33
"vmla.f32 q12, q9, q2 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q13, q9, q4 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vmla.f32 q14, q10, q3 \n"
"vshll.u16 q11, d23, #16 \n"// k34
"vmla.f32 q15, q10, q5 \n"
"vshll.u16 q10, d16, #16 \n"// k40
"vmla.f32 q12, q11, q4 \n"
"vmla.f32 q13, q11, q6 \n"
"pld [%6, #256] \n"
"vld1.u16 {d4-d7}, [%6 :64]! \n"// r40 r41 r42 r43
"vshll.u16 q11, d17, #16 \n"// k41
"pld [%6, #256] \n"
"vld1.u16 {d10-d12}, [%6 :64] \n"// r44 r45 r46
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q10, q0 \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q15, q10, q2 \n"
"vshll.u16 q8, d18, #16 \n"// k42
"vmla.f32 q12, q11, q1 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q11, q3 \n"
"pld [%7, #64] \n"
"vld1.u16 {d20}, [%7 :64] \n"
"vmla.f32 q14, q8, q2 \n"
"vshll.u16 q9, d19, #16 \n"// k43
"vmla.f32 q15, q8, q4 \n"
"vshll.u16 q8, d20, #16 \n"// k44
"vmla.f32 q12, q9, q3 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q13, q9, q5 \n"
"vmla.f32 q14, q8, q4 \n"
"vmla.f32 q15, q8, q6 \n"
"vadd.f32 q12, q12, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"sub %7, %7, #192 \n"// kptr -= 24 * 4;
"vshrn.u32 d24, q12, #16 \n"
"vshrn.u32 d25, q13, #16 \n"
"vst1.u16 {d24-d25}, [%0 :64]! \n"
: "=r"(outptr0), // %0
"=r"(bias0_data_ptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0),
"1"(bias0_data_ptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
for (; j < outw; j++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v16.4h, v17.4h}, [%1], #16 \n"// r00 r01
"prfm pldl1keep, [%1, #192] \n"
"ld1 {v18.4h, v19.4h, v20.4h}, [%1] \n"// r02 r03 r04
"shll v14.4s, %12.4h, #16 \n"
"mov v31.16b, %25.16b \n"// sum00
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"shll2 v15.4s, %12.8h, #16 \n"
"fmul v28.4s, v14.4s, v16.4s \n"
"shll v14.4s, %13.4h, #16 \n"
"fmul v29.4s, v15.4s, v17.4s \n"
"shll2 v15.4s, %13.8h, #16 \n"
"fmul v30.4s, v14.4s, v18.4s \n"
"shll v14.4s, %14.4h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %14.8h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v16.4h, v17.4h}, [%2], #16 \n"// r10 r11
"prfm pldl1keep, [%2, #192] \n"
"ld1 {v18.4h, v19.4h, v20.4h}, [%2] \n"// r12 r13 r14
"shll v14.4s, %15.4h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v29.4s, v15.4s, v16.4s \n"
"shll2 v15.4s, %15.8h, #16 \n"
"fmla v30.4s, v14.4s, v17.4s \n"
"shll v14.4s, %16.4h, #16 \n"
"fmla v31.4s, v15.4s, v18.4s \n"
"shll2 v15.4s, %16.8h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"shll v14.4s, %17.4h, #16 \n"
"fmla v29.4s, v15.4s, v20.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v16.4h, v17.4h}, [%3], #16 \n"// r20 r21
"prfm pldl1keep, [%3, #192] \n"
"ld1 {v18.4h, v19.4h, v20.4h}, [%3] \n"// r22 r23 r24
"shll2 v15.4s, %17.8h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v30.4s, v14.4s, v16.4s \n"
"shll v14.4s, %18.4h, #16 \n"
"fmla v31.4s, v15.4s, v17.4s \n"
"shll2 v15.4s, %18.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"shll v14.4s, %19.4h, #16 \n"
"fmla v29.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %19.8h, #16 \n"
"fmla v30.4s, v14.4s, v20.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v16.4h, v17.4h}, [%4], #16 \n"// r30 r31
"prfm pldl1keep, [%4, #192] \n"
"ld1 {v18.4h, v19.4h, v20.4h}, [%4] \n"// r32 r33 r34
"shll v14.4s, %20.4h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v31.4s, v15.4s, v16.4s \n"
"shll2 v15.4s, %20.8h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"shll v14.4s, %21.4h, #16 \n"
"fmla v29.4s, v15.4s, v18.4s \n"
"shll2 v15.4s, %21.8h, #16 \n"
"fmla v30.4s, v14.4s, v19.4s \n"
"shll v14.4s, %22.4h, #16 \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v16.4h, v17.4h}, [%5], #16 \n"// r40 r41
"prfm pldl1keep, [%5, #192] \n"
"ld1 {v18.4h, v19.4h, v20.4h}, [%5] \n"// r42 r43 r44
"shll2 v15.4s, %22.8h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v14.4s, %23.4h, #16 \n"
"fmla v29.4s, v15.4s, v17.4s \n"
"shll2 v15.4s, %23.8h, #16 \n"
"fmla v30.4s, v14.4s, v18.4s \n"
"shll v14.4s, %24.4h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"fadd v29.4s, v29.4s, v30.4s \n"
"fadd v31.4s, v31.4s, v28.4s \n"
"fadd v31.4s, v31.4s, v29.4s \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v31.4h}, [%0], #8 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4) // %5
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"w"(_k00_01), // %12
"w"(_k02_03), // %13
"w"(_k04_10), // %14
"w"(_k11_12), // %15
"w"(_k13_14), // %16
"w"(_k20_21), // %17
"w"(_k22_23), // %18
"w"(_k24_30), // %19
"w"(_k31_32), // %20
"w"(_k33_34), // %21
"w"(_k40_41), // %22
"w"(_k42_43), // %23
"w"(_k44), // %24
"w"(_bias0) // %25
: "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
#else // __aarch64__
asm volatile(
"pld [%2, #128] \n"
"vld1.u16 {d2-d3}, [%2 :64]! \n"// r00 r01
"pld [%2, #192] \n"
"vld1.u16 {d6-d8}, [%2 :64] \n"// r02 r03 r04
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q8, d20, #16 \n"// k00
"pld [%1, #128] \n"
"vld1.f32 {d24-d25}, [%1] \n"// sum0
"vshll.u16 q9, d21, #16 \n"// k01
"vmul.f32 q13, q8, q0 \n"
"vshll.u16 q10, d22, #16 \n"// k02
"vmul.f32 q14, q9, q1 \n"
"pld [%3, #128] \n"
"vld1.u16 {d14-d15}, [%3 :64]! \n"// r10 r11
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d8, #16 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q11, d23, #16 \n"// k03
"vmul.f32 q15, q10, q2 \n"
"vshll.u16 q10, d16, #16 \n"// k04
"vmla.f32 q12, q11, q3 \n"
"vshll.u16 q11, d17, #16 \n"// k10
"vmla.f32 q13, q10, q4 \n"
"pld [%3, #192] \n"
"vld1.u16 {d8-d10}, [%3 :64] \n"// r12 r13 r14
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vshll.u16 q8, d18, #16 \n"// k11
"vmla.f32 q14, q11, q6 \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q9, d19, #16 \n"// k12
"vmla.f32 q15, q8, q7 \n"
"pld [%4, #128] \n"
"vld1.u16 {d2-d3}, [%4 :64]! \n"// r20 r21
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vshll.u16 q5, d10, #16 \n"
"vshll.u16 q8, d20, #16 \n"// k13
"vmla.f32 q12, q9, q3 \n"
"vshll.u16 q9, d21, #16 \n"// k14
"vmla.f32 q13, q8, q4 \n"
"vshll.u16 q10, d22, #16 \n"// k20
"vmla.f32 q14, q9, q5 \n"
"pld [%4, #192] \n"
"vld1.u16 {d6-d8}, [%4 :64] \n"// r22 r23 r24
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q11, d23, #16 \n"// k21
"vmla.f32 q15, q10, q0 \n"
"vshll.u16 q10, d16, #16 \n"// k22
"vmla.f32 q12, q11, q1 \n"
"pld [%5, #128] \n"
"vld1.u16 {d14-d15}, [%5 :64]! \n"// r30 r31
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d8, #16 \n"
"vshll.u16 q11, d17, #16 \n"// k23
"vmla.f32 q13, q10, q2 \n"
"vshll.u16 q8, d18, #16 \n"// k24
"vmla.f32 q14, q11, q3 \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q9, d19, #16 \n"// k30
"vmla.f32 q15, q8, q4 \n"
"pld [%5, #192] \n"
"vld1.u16 {d8-d10}, [%5 :64] \n"// r32 r33 r34
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vshll.u16 q8, d20, #16 \n"// k31
"vmla.f32 q12, q9, q6 \n"
"vshll.u16 q9, d21, #16 \n"// k32
"vmla.f32 q13, q8, q7 \n"
"pld [%6, #128] \n"
"vld1.u16 {d2-d3}, [%6 :64]! \n"// r40 r41
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vshll.u16 q5, d10, #16 \n"
"vshll.u16 q10, d22, #16 \n"// k33
"vmla.f32 q14, q9, q3 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q11, d23, #16 \n"// k34
"vmla.f32 q15, q10, q4 \n"
"vshll.u16 q10, d16, #16 \n"// k40
"vmla.f32 q12, q11, q5 \n"
"pld [%6, #192] \n"
"vld1.u16 {d6-d8}, [%6 :64] \n"// r42 r43 r44
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vshll.u16 q11, d17, #16 \n"// k41
"vmla.f32 q13, q10, q0 \n"
"vshll.u16 q8, d18, #16 \n"// k42
"vmla.f32 q14, q11, q1 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d8, #16 \n"
"pld [%7, #64] \n"
"vld1.u16 {d20}, [%7 :64] \n"
"vshll.u16 q9, d19, #16 \n"// k43
"vmla.f32 q15, q8, q2 \n"
"vshll.u16 q8, d20, #16 \n"// k44
"vmla.f32 q12, q9, q3 \n"
"vmla.f32 q13, q8, q4 \n"
"vadd.f32 q14, q14, q15 \n"
"vadd.f32 q12, q12, q13 \n"
"sub %7, %7, #192 \n"// kptr -= 24 * 4;
"vadd.f32 q12, q12, q14 \n"
"vshrn.u32 d24, q12, #16 \n"
"vst1.u16 {d24}, [%0 :64]! \n"
: "=r"(outptr0), // %0
"=r"(bias0_data_ptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0),
"1"(bias0_data_ptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
|
InsertionSort_OpenMP.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <time.h>
#define SIZE 2000
long array[SIZE];
double avg;
void InsertionSort(int start, int end){
for (int i = start+1 ; i < end ; i++){
int temp = array[i];
int j = i;
while (j > 0 && array[j-1] > temp){
array[j] = array[j-1];
j--;
}
array[j] = temp;
}
}
void Automated()
{
long i;
srand (time(NULL));
for (i = 0 ; i < SIZE ; i++)
{
if (i < 500)
{
array[i] = 1 + (rand() % 500);
}
else if (i < 1000)
{
array[i] = 501 + (rand() % 500);
}
else if (i < 1500)
{
array[i] = 1001 + (rand() % 500);
}
else if (i < 2000)
{
array[i] = 1501 + (rand() % 500);
}
// array[i] = i + (rand() % 10);
}
clock_t start = clock();
// printf("==============================Unsorted Array==============================\n\n");
// for (i = 0 ; i < SIZE ; i++)
// {
// printf("array[%ld] ==> %ld \n", i, array[i]);
// }
#pragma omp parallel
{
omp_set_num_threads(4);
int total_threads = omp_get_num_threads();
// printf("--------Total Threads: %d--------\n\n", total_threads);
int segment = SIZE/total_threads;
#pragma omp for
for(i = 0; i < total_threads; i++){
InsertionSort(i*segment, i*segment+segment);
}
}
// printf("===============================Sorted Array==============================\n\n");
// for (i = 0 ; i < SIZE ; i++){
// printf("array[%ld] ==> %ld \n", i, array[i]);
// }
clock_t stop = clock();
double elapsed = (double)(stop - start) * 1000.0 / CLOCKS_PER_SEC;
// printf("-------------------------\nTime elapsed in ms: %f\n-------------------------\n", elapsed);
avg += elapsed;
}
int main(){
int i;
avg = 0;
for (i = 0 ; i < 100 ; i++)
{
Automated();
}
avg /= 100;
printf("\n\nOPenMP: Average Time Taken; InsertionSort: %lf \n\n", avg);
return 0;
}
|
utilityGraphPartitioner.h | // ***********************************************************************
//
// Grappolo: A C++ library for graph clustering
// Mahantesh Halappanavar (hala@pnnl.gov)
// Pacific Northwest National Laboratory
//
// ***********************************************************************
//
// Copyright (2014) Battelle Memorial Institute
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// ************************************************************************
#ifndef _graph_partitioner_
#define _graph_partitioner_
/*
int METIS PartGraphKway(idx_t *nvtxs, idx_t *ncon, idx_t *xadj, idx_t *adjncy,
idx_t *vwgt, idx_t *vsize, idx_t *adjwgt, idx_t *nparts, real_t *tpwgts,
real_t ubvec, idx_t *options, idx_t *objval, idx_t *part)
nvtxs: The number of vertices in the graph.
ncon: The number of balancing constraints. It should be at least 1.
xadj, adjncy: The adjacency structure of the graph as described in Section 5.5.
vwgt (NULL): The weights of the vertices as described in Section 5.5.
vsize (NULL): The size of the vertices for computing the total communication volume as described in Section 5.7.
adjwgt (NULL): The weights of the edges as described in Section 5.5.
nparts The number of parts to partition the graph.
tpwgts (NULL): This is an array of size npartsncon that specifies the desired weight for each partition and constraint.
The target partition weight for the ith partition and jth constraint is specified at tpwgts[i*ncon+j]
(the numbering for both partitions and constraints starts from 0). For each constraint, the sum of the
tpwgts[] entries must be 1.0 (i.e., \Sum_i tpwgts[i*ncon + j] = 1:0).
A NULL value can be passed to indicate that the graph should be equally divided among the partitions.
ubvec (NULL): This is an array of size ncon that specifies the allowed load imbalance tolerance for each constraint.
For the ith partition and jth constraint the allowed weight is the ubvec[j]*tpwgts[i*ncon+j] fraction
of the jth’s constraint total weight. The load imbalances must be greater than 1.0.
A NULL value can be passed indicating that the load imbalance tolerance for each constraint should
be 1.001 (for ncon=1) or 1.01 (for ncon<1).
options (NULL):
This is the array of options as described in Section 5.4.
The following options are valid for METIS PartGraphRecursive:
METIS_OPTION_CTYPE, METIS_OPTION_IPTYPE, METIS_OPTION_RTYPE,
METIS_OPTION_NO2HOP, METIS_OPTION_NCUTS, METIS_OPTION_NITER,
METIS_OPTION_SEED, METIS_OPTION_UFACTOR, METIS_OPTION_NUMBERING,
METIS_OPTION_DBGLVL
The following options are valid for METIS PartGraphKway:
METIS_OPTION_OBJTYPE, METIS_OPTION_CTYPE, METIS_OPTION_IPTYPE,
METIS_OPTION_RTYPE, METIS_OPTION_NO2HOP, METIS_OPTION_NCUTS,
METIS_OPTION_NITER, METIS_OPTION_UFACTOR, METIS_OPTION_MINCONN,
METIS_OPTION_CONTIG, METIS_OPTION_SEED, METIS_OPTION_NUMBERING,
METIS_OPTION_DBGLVL
objval: Upon successful completion, this variable stores the edge-cut or the total communication volume of
the partitioning solution. The value returned depends on the partitioning’s objective function.
part: This is a vector of size nvtxs that upon successful completion stores the partition vector of the graph.
The numbering of this vector starts from either 0 or 1, depending on the value of
options[METIS OPTION NUMBERING].
Returns
METIS OK Indicates that the function returned normally.
METIS ERROR INPUT Indicates an input error.
METIS ERROR MEMORY Indicates that it could not allocate the required memory.
METIS ERROR Indicates some other type of error.
*/
extern "C" {
#include "metis.h"
}
using namespace std;
/*
#ifdef __cplusplus
extern "C" {
#endif
//Multilevel k-way Partitioning
int METIS_PartGraphKway(idx_t *nvtxs, idx_t *ncon, idx_t *xadj, idx_t *adjncy,
idx_t *vwgt, idx_t *vsize, idx_t *adjwgt, idx_t *nparts, real_t *tpwgts,
real_t ubvec, idx_t *options, idx_t *objval, idx_t *part);
#ifdef __cplusplus
}
#endif
*/
//METIS Graph Partitioner:
void MetisGraphPartitioner( graph *G, comm_type *VertexPartitioning, int numParts ) {
printf("Within MetisGraphPartitioner(): \n");
printf("Number of partitions requested: %ld\n", numParts);
//Get the iterators for the graph:
comm_type NV = G->numVertices;
comm_type NE = G->numEdges;
comm_type *vtxPtr = G->edgeListPtrs;
edge *vtxInd = G->edgeList;
printf("|V|= %ld, |E|= %ld \n", NV, NE);
idx_t nvtxs = (idx_t) NV;
idx_t *xadj = (idx_t *) malloc ((NV+1) * sizeof(idx_t));
assert(xadj != 0);
#pragma omp parallel for
for(comm_type i=0; i<=NV; i++) {
xadj[i] = (idx_t) vtxPtr[i];
}
idx_t *adjncy = (idx_t *) malloc (2*NE * sizeof(idx_t));
assert(adjncy != 0);
#pragma omp parallel for
for(comm_type i=0; i<2*NE; i++) {
adjncy[i] = (idx_t) vtxInd[i].tail;
}
idx_t *adjwgt = (idx_t *) malloc (2*NE * sizeof(idx_t));
assert(adjwgt != 0);
#pragma omp parallel for
for(comm_type i=0; i<2*NE; i++) {
adjwgt[i] = (idx_t) vtxInd[i].weight;
}
idx_t nparts = (idx_t) numParts;
real_t ubvec = 1.03;
idx_t options[METIS_NOPTIONS];
METIS_SetDefaultOptions(options);
options[METIS_OPTION_OBJTYPE] = METIS_OBJTYPE_CUT; //Edgecut minimization
options[METIS_OPTION_CTYPE] = METIS_CTYPE_SHEM; //Sorted heavy-edge matching
options[METIS_OPTION_NUMBERING]= 0; //C-style numbering, starting from 0
//options[METIS_OPTION_NO2HOP]= 0; //Performs a 2-hop matching -- effective for power-law graphs
options[METIS_OPTION_NSEPS]= 10; //Number of iterations for refinement
//options[METIS_OPTION_UFACTOR] = 30;
idx_t ncon = 1; //Number of balancing constraints (at least 1)
idx_t objval = 0; //Will contain the edgecut (or total communication)
idx_t *part = (idx_t *) malloc (NV * sizeof(idx_t)); //Partition information
assert(part != 0);
int returnVal = METIS_PartGraphKway(&nvtxs, &ncon, xadj, adjncy, NULL, NULL, adjwgt,
&nparts, NULL, NULL, options, &objval, part);
if(returnVal == METIS_OK)
printf("Edge cut: %ld\n", objval);
else {
if(returnVal == METIS_ERROR_MEMORY)
printf("Metis could not allocate memory.\n");
else
printf("Metis error: %ld\n", returnVal);
}
#pragma omp parallel for
for(comm_type i=0; i<=NV; i++) {
VertexPartitioning[i] = (comm_type) part[i]; //Do explicit typecasts
}
//Cleaup:
free(xadj); free(adjncy); free(adjwgt);
free(part);
printf("Returning back from Metis\n");
}
#endif
|
ofmo-twoint-direct.c | /**
* @file ofmo-twoint-direct.c
* 各タイプの二電子積分を計算して2電子ハミルトン行列(G行列)を
* 計算する
* この関数では、積分計算とG行列計算の両方を行っている
* */
/**
* @defgroup integ-twoint-direct direct法によるG行列作成を行う関数群
* buffered direct SCF法において、バッファに保存出来なかった2電子積分を
* 計算しながらG行列生成を行う関数群
*
* 各関数は、同じ引数を持っているので、以下にその内容を記す。
*
* @param[in] nworkers 計算に用いるワーカプロセス(スレッド)数
* @param[in] workerid 各ワーカプロセス(スレッド)のID
* (\f$ 0\le\tt{workerid}<\tt{nworkers} \f$)
* @param[in] ebuf_max_nzeri バッファに保存できる最大積分数
* @param[in] La 1つ目の軌道量子数
* @param[in] Lb 2つ目の軌道量子数
* (\f$ \tt{La} \ge \tt{Lb} \f$)
* @param[in] Lc 3つ目の軌道量子数
* (\f$ \tt{La} \ge \tt{Lc} \f$)
* @param[in] Ld 4つ目の軌道量子数
* (\f$ \tt{Lc} \ge \tt{Ld} \f$、かつ、
* \f$ \frac{\tt{La}(\tt{La}+1)}2+\tt{Lb} \ge
* \frac{\tt{Lc}(\tt{Lc}+1)}2+\tt{Ld} \f$)
* @param[in] shel_atm[ics] CS番号 \c ics のCSが属する原子の番号
* @param[in] shel_ini[ics] CS番号 \c ics のCSに含まれるAOの先頭AO番号
* @param[in] atom_x[iat] 原子の番号 \c iat のx座標(au単位)
* @param[in] atom_y[iat] 原子の番号 \c iat のy座標(au単位)
* @param[in] atom_z[iat] 原子の番号 \c iat のz座標(au単位)
* @param[in] leading_cs_pair[itype] CSペアタイプ番号 \c itype の
* 先頭CSペア番号
* @param[in] csp_schwarz[icsp] CSペア番号 \c icsp のSchwarz積分
* @param[in] csp_ics[icsp] CSペア番号 \c icsp の1つ目のCS番号
* @param[in] csp_jcs[icsp] CSペア番号 \c icsp の2つめのCS番号。ただし、
* \f$ \tt{csp\_ics[icsp]} \ge \tt{csp\_jcs[icsp]} \f$ である。
* @param[in] csp_leading_ps_pair[icsp] CSペア番号 \c icsp に含まれる
* PSペアの先頭PSペア番号
* @param[in] psp_zeta[ipsp] PSペア番号 \c ipsp の軌道指数和
* \f$ \zeta = \zeta_a + \zeta_b \f$
* @param[in] psp_dkps[ipsp] PSペア番号 \c ipsp の線型結合定数
* \f[ K_{ab} = \sqrt2 \pi^{5/4} \frac1{\zeta_a+\zeta_b}
* \exp\left[ -\frac{\zeta_a \zeta_b}{\zeta_a + \zeta_b}
* ( \boldmath A \unboldmath - \boldmath B \unboldmath )^2
* \right]\f]
* @param[in] psp_xiza[ipsp] PSペア番号 \c ipsp の
* \f$ \frac{\xi}{\zeta_a} = \frac{\zeta_b}{\zeta_a+\zeta_b} \f$
*
* @param[in] last_ijcs 積分計算を始める際の、外側CSペアループ変数の初期値
* @param[in] last_klcs 積分計算を始める際の、外側CSペアループ変数の初期値
*
* @param[in] nao AO数
* @param[in] Ds[] 密度行列(正方行列形式)
* @param[out] G[] 計算されたG行列要素を格納する配列(正方行列形式)
*
* @ingroup integ-med
* */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "ofmo-def.h"
#define OFMO_EBUF_FULL -1
#define OFMO_EBUF_NOFULL 0
#define EPS_PS4 1.e-30
#define EPS_ERI 1.e-15
#define ZERO 0.e0
#define ONE 1.e0
#define HALF .5e0
#include "ofmo-twoint-core.h"
#include "ofmo-twoint.h"
extern int ofmo_integ_add_fock( const int nao, const size_t nstored_eri,
const double eri_val[], const short int eri_ind4[],
const double D[], double G[] );
extern float ofmo_twoint_dmax6(const int i, const int j, const int k, const int l);
/** (ss,ss)タイプの積分計算、および、G行列計算を行う関数
*
* @ingroup integ-twoint-direct
* */
int ofmo_twoint_direct_ssss__(
// paralleization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis set & cutoff table data
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// concerned about buffered direct method
const long *petmp_max_nzeri, long *petmp_non_zero_eri,
double etmp_val[], short int etmp_ind4[],
const int *plast_ijcs, const int *plast_klcs,
// density matrix & G-matrix data
const int *pnao, const double Ds[], double G[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int last_ijcs=*plast_ijcs, last_klcs=*plast_klcs, nao=*pnao;
long max_nzeri=*petmp_max_nzeri;
long nzeri4, nzeri=*petmp_non_zero_eri;
int Lab, Lcd, i;
int ijcs, ijcs1, ijao;
int klcs, klcs0, klao;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, jcs, jat, jao, kcs, kat, kao, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe;
float eps_eri = ofmo_twoint_eps_eri(0);
float eps_ps4 = ofmo_twoint_eps_ps4(0);
float eps_sch = ofmo_twoint_eps_sch(0);
double SSSS[1];
Lab = La*(La+1)/2+Lb;
Lcd = Lc*(Lc+1)/2+Ld;
ijcs1 = leading_cs_pair[Lab+1];
klcs0 = leading_cs_pair[Lcd];
if ( last_ijcs != -1 ) {
ijcs = last_ijcs;
klcs = last_klcs+1;
} else {
ijcs = leading_cs_pair[Lab] + workerid;
klcs = klcs0;
}
max_nzeri-=0;
if ( nzeri>= max_nzeri ) {
ofmo_integ_add_fock( nao, (size_t)nzeri, etmp_val, etmp_ind4, Ds, G );
nzeri = 0;
}
nzeri4 = nzeri<<2;
for ( ; ijcs<ijcs1; ijcs+=nworkers ) {
val_ab = csp_schwarz[ijcs];
ics = csp_ics[ijcs];
jcs = csp_jcs[ijcs];
ijps0 = csp_leading_ps_pair[ijcs];
nijps = csp_leading_ps_pair[ijcs+1]-ijps0;
iat = shel_atm[ics];
jat = shel_atm[jcs];
iao = shel_ini[ics];
jao = shel_ini[jcs];
ijao = iao*(iao+1)/2+jao;
A[0]=atom_x[iat]; A[1]=atom_y[iat]; A[2]=atom_z[iat];
B[0]=atom_x[jat]; B[1]=atom_y[jat]; B[2]=atom_z[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
for ( ; klcs<=ijcs; klcs++ ) {
val_cd = csp_schwarz[klcs];
if ( val_ab*val_cd < eps_ps4 ) continue;
kcs = csp_ics[klcs];
lcs = csp_jcs[klcs];
float dmax = ofmo_twoint_dmax6(ics,jcs,kcs,lcs);
if ( dmax*val_ab*val_cd < eps_sch ) continue;
klps0 = csp_leading_ps_pair[klcs];
nklps = csp_leading_ps_pair[klcs+1]-klps0;
kat = shel_atm[kcs];
lat = shel_atm[lcs];
kao = shel_ini[kcs];
lao = shel_ini[lcs];
klao = kao*(kao+1)/2+lao;
C[0]=atom_x[kat]; C[1]=atom_y[kat]; C[2]=atom_z[kat];
D[0]=atom_x[lat]; D[1]=atom_y[lat]; D[2]=atom_z[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_ssss__(
&nijps, &psp_zeta[ijps0], &psp_dkps[ijps0],
&psp_xiza[ijps0], BA,
&nklps, &psp_zeta[klps0], &psp_dkps[klps0],
&psp_xiza[klps0], DC, AC, SSSS );
if ( fabs(SSSS[0]) > eps_eri ) {
double x;
coe = ONE;
if ( iao == jao ) coe = HALF;
if ( kao == lao ) coe *= HALF;
if ( ijao == klao ) coe *= HALF;
x = coe * SSSS[0];
etmp_val[nzeri] = x;
etmp_ind4[nzeri4+0] = iao;
etmp_ind4[nzeri4+1] = jao;
etmp_ind4[nzeri4+2] = kao;
etmp_ind4[nzeri4+3] = lao;
nzeri++;
nzeri4+=4;
} // if ( fabs(SSSS[0]) > eps_eri )
if ( nzeri >= max_nzeri ) {
ofmo_integ_add_fock( nao, (size_t)nzeri, etmp_val, etmp_ind4,
Ds, G );
nzeri = nzeri4= 0;
}
} // for ( klcs );
klcs = klcs0;
} // for ( ijcs );
*petmp_non_zero_eri = nzeri;
return 0;
}
/** (ps,ss)タイプの積分計算、および、G行列計算を行う関数
*
* @ingroup integ-twoint-direct
* */
int ofmo_twoint_direct_psss__(
// paralleization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis set & cutoff table data
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// concerned about buffered direct method
const long *petmp_max_nzeri, long *petmp_non_zero_eri,
double etmp_val[], short int etmp_ind4[],
const int *plast_ijcs, const int *plast_klcs,
// density matrix & G-matrix data
const int *pnao, const double Ds[], double G[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int last_ijcs=*plast_ijcs, last_klcs=*plast_klcs, nao=*pnao;
long max_nzeri=*petmp_max_nzeri;
long nzeri4, nzeri=*petmp_non_zero_eri;
int Lab, Lcd, i;
int ijcs, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao;
int kcs, kat, kao, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe;
float eps_eri = ofmo_twoint_eps_eri(0);
float eps_ps4 = ofmo_twoint_eps_ps4(0);
float eps_sch = ofmo_twoint_eps_sch(0);
double PSSS[3];
Lab = La*(La+1)/2+Lb;
Lcd = Lc*(Lc+1)/2+Ld;
ijcs1 = leading_cs_pair[Lab+1];
klcs0 = leading_cs_pair[Lcd];
klcs1 = leading_cs_pair[Lcd+1];
if ( last_ijcs != -1 ) {
ijcs = last_ijcs;
klcs = last_klcs+1;
} else {
ijcs = leading_cs_pair[Lab] + workerid;
klcs = klcs0;
}
max_nzeri-=3;
if ( nzeri>= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, Ds, G );
nzeri = 0;
}
nzeri4 = nzeri<<2;
for ( ; ijcs<ijcs1; ijcs+=nworkers ) {
val_ab = csp_schwarz[ijcs];
ics = csp_ics[ijcs];
jcs = csp_jcs[ijcs];
ijps0 = csp_leading_ps_pair[ijcs];
nijps = csp_leading_ps_pair[ijcs+1]-ijps0;
iat = shel_atm[ics];
jat = shel_atm[jcs];
iao0 = shel_ini[ics];
jao = shel_ini[jcs];
A[0]=atom_x[iat]; A[1]=atom_y[iat]; A[2]=atom_z[iat];
B[0]=atom_x[jat]; B[1]=atom_y[jat]; B[2]=atom_z[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
for ( ; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz[klcs];
if ( val_ab*val_cd < eps_ps4 ) continue;
kcs = csp_ics[klcs];
lcs = csp_jcs[klcs];
float dmax = ofmo_twoint_dmax6(ics,jcs,kcs,lcs);
if ( dmax*val_ab*val_cd < eps_sch ) continue;
klps0 = csp_leading_ps_pair[klcs];
nklps = csp_leading_ps_pair[klcs+1]-klps0;
kat = shel_atm[kcs];
lat = shel_atm[lcs];
kao = shel_ini[kcs];
lao = shel_ini[lcs];
C[0]=atom_x[kat]; C[1]=atom_y[kat]; C[2]=atom_z[kat];
D[0]=atom_x[lat]; D[1]=atom_y[lat]; D[2]=atom_z[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_psss__(
&nijps, &psp_zeta[ijps0], &psp_dkps[ijps0],
&psp_xiza[ijps0], BA,
&nklps, &psp_zeta[klps0], &psp_dkps[klps0],
&psp_xiza[klps0], DC, AC, PSSS );
coe = ( kao == lao ? HALF : ONE );
for ( i=0, iao=iao0; i<3; i++, iao++ ) {
if ( fabs(PSSS[i]) > eps_eri ) {
double x;
x = coe * PSSS[i];
etmp_val[nzeri] = x;
etmp_ind4[nzeri4+0] = iao;
etmp_ind4[nzeri4+1] = jao;
etmp_ind4[nzeri4+2] = kao;
etmp_ind4[nzeri4+3] = lao;
nzeri++;
nzeri4+=4;
}
} // for ( i, iao);
if ( nzeri >= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4,
Ds, G );
nzeri = nzeri4= 0;
}
} // for ( klcs );
klcs = klcs0;
} // for ( ijcs );
*petmp_non_zero_eri = nzeri;
return 0;
}
/** (ps,ps)タイプの積分計算、および、G行列計算を行う関数
*
* @ingroup integ-twoint-direct
* */
int ofmo_twoint_direct_psps__(
// paralleization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis set & cutoff table data
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// concerned about buffered direct method
const long *petmp_max_nzeri, long *petmp_non_zero_eri,
double etmp_val[], short int etmp_ind4[],
const int *plast_ijcs, const int *plast_klcs,
// density matrix & G-matrix data
const int *pnao, const double Ds[], double G[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int last_ijcs=*plast_ijcs, last_klcs=*plast_klcs, nao=*pnao;
long max_nzeri=*petmp_max_nzeri;
long nzeri4, nzeri=*petmp_non_zero_eri;
int Lab, Lcd, i, k, ix;
int ipat, IJ, KL;
int ijcs, ijcs1;
int klcs, klcs0;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao;
int kcs, kat, kao, kao0, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe;
float eps_eri = ofmo_twoint_eps_eri(0);
float eps_ps4 = ofmo_twoint_eps_ps4(0);
float eps_sch = ofmo_twoint_eps_sch(0);
double PSPS[3*3];
Lab = La*(La+1)/2+Lb;
Lcd = Lc*(Lc+1)/2+Ld;
ijcs1 = leading_cs_pair[Lab+1];
klcs0 = leading_cs_pair[Lcd];
if ( last_ijcs != -1 ) {
ijcs = last_ijcs;
klcs = last_klcs+1;
} else {
ijcs = leading_cs_pair[Lab] + workerid;
klcs = klcs0;
}
max_nzeri-=3*3;
if ( nzeri>= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, Ds, G );
nzeri = 0;
}
nzeri4 = nzeri<<2;
for ( ; ijcs<ijcs1; ijcs+=nworkers ) {
val_ab = csp_schwarz[ijcs];
ics = csp_ics[ijcs];
jcs = csp_jcs[ijcs];
ijps0 = csp_leading_ps_pair[ijcs];
nijps = csp_leading_ps_pair[ijcs+1]-ijps0;
iat = shel_atm[ics];
jat = shel_atm[jcs];
iao0 = shel_ini[ics];
jao = shel_ini[jcs];
A[0]=atom_x[iat]; A[1]=atom_y[iat]; A[2]=atom_z[iat];
B[0]=atom_x[jat]; B[1]=atom_y[jat]; B[2]=atom_z[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
for ( ; klcs<=ijcs; klcs++ ) {
val_cd = csp_schwarz[klcs];
if ( val_ab*val_cd < eps_ps4 ) continue;
kcs = csp_ics[klcs];
lcs = csp_jcs[klcs];
float dmax = ofmo_twoint_dmax6(ics,jcs,kcs,lcs);
if ( dmax*val_ab*val_cd < eps_sch ) continue;
klps0 = csp_leading_ps_pair[klcs];
nklps = csp_leading_ps_pair[klcs+1]-klps0;
kat = shel_atm[kcs];
lat = shel_atm[lcs];
kao0 = shel_ini[kcs];
lao = shel_ini[lcs];
C[0]=atom_x[kat]; C[1]=atom_y[kat]; C[2]=atom_z[kat];
D[0]=atom_x[lat]; D[1]=atom_y[lat]; D[2]=atom_z[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_psps__(
&nijps, &psp_zeta[ijps0], &psp_dkps[ijps0],
&psp_xiza[ijps0], BA,
&nklps, &psp_zeta[klps0], &psp_dkps[klps0],
&psp_xiza[klps0], DC, AC, PSPS );
ipat = ( (ics==kcs && jcs>lcs) ? true : false);
for ( i=0, iao=iao0, ix=0; i<3; i++, iao++ ) {
IJ = ((iao*iao+iao)>>1) + jao;
for ( k=0, kao=kao0; k<3; k++, kao++, ix++ ) {
KL = ((kao*kao+kao)>>1) + lao ;
if ( fabs(PSPS[ix]) <= eps_eri ) continue;
if ( IJ>=KL || ipat ) {
double x;
coe = ONE;
if ( IJ == KL ) coe = HALF;
x = coe * PSPS[ix];
etmp_val[nzeri] = x;
etmp_ind4[nzeri4+0] = iao;
etmp_ind4[nzeri4+1] = jao;
etmp_ind4[nzeri4+2] = kao;
etmp_ind4[nzeri4+3] = lao;
nzeri++;
nzeri4+=4;
}
} // for ( kao )
} // for ( iao )
if ( nzeri >= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4,
Ds, G );
nzeri = nzeri4= 0;
}
} // for ( klcs );
klcs = klcs0;
} // for ( ijcs );
*petmp_non_zero_eri = nzeri;
return 0;
}
/** (pp,ss)タイプの積分計算、および、G行列計算を行う関数
*
* @ingroup integ-twoint-direct
* */
int ofmo_twoint_direct_ppss__(
// paralleization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis set & cutoff table data
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// concerned about buffered direct method
const long *petmp_max_nzeri, long *petmp_non_zero_eri,
double etmp_val[], short int etmp_ind4[],
const int *plast_ijcs, const int *plast_klcs,
// density matrix & G-matrix data
const int *pnao, const double Ds[], double G[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int last_ijcs=*plast_ijcs, last_klcs=*plast_klcs, nao=*pnao;
long max_nzeri=*petmp_max_nzeri;
long nzeri4, nzeri=*petmp_non_zero_eri;
int Lab, Lcd, i, j, ix;
int ijcs, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe, coe0;
float eps_eri = ofmo_twoint_eps_eri(0);
float eps_ps4 = ofmo_twoint_eps_ps4(0);
float eps_sch = ofmo_twoint_eps_sch(0);
double PPSS[3*3];
Lab = La*(La+1)/2+Lb;
Lcd = Lc*(Lc+1)/2+Ld;
ijcs1 = leading_cs_pair[Lab+1];
klcs0 = leading_cs_pair[Lcd];
klcs1 = leading_cs_pair[Lcd+1];
if ( last_ijcs != -1 ) {
ijcs = last_ijcs;
klcs = last_klcs+1;
} else {
ijcs = leading_cs_pair[Lab] + workerid;
klcs = klcs0;
}
max_nzeri-=3*3;
if ( nzeri>= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, Ds, G );
nzeri = 0;
}
nzeri4 = nzeri<<2;
for ( ; ijcs<ijcs1; ijcs+=nworkers ) {
val_ab = csp_schwarz[ijcs];
ics = csp_ics[ijcs];
jcs = csp_jcs[ijcs];
ijps0 = csp_leading_ps_pair[ijcs];
nijps = csp_leading_ps_pair[ijcs+1]-ijps0;
iat = shel_atm[ics];
jat = shel_atm[jcs];
iao0 = shel_ini[ics];
jao0 = shel_ini[jcs];
A[0]=atom_x[iat]; A[1]=atom_y[iat]; A[2]=atom_z[iat];
B[0]=atom_x[jat]; B[1]=atom_y[jat]; B[2]=atom_z[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
for ( ; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz[klcs];
if ( val_ab*val_cd < eps_ps4 ) continue;
kcs = csp_ics[klcs];
lcs = csp_jcs[klcs];
float dmax = ofmo_twoint_dmax6(ics,jcs,kcs,lcs);
if ( dmax*val_ab*val_cd < eps_sch ) continue;
klps0 = csp_leading_ps_pair[klcs];
nklps = csp_leading_ps_pair[klcs+1]-klps0;
kat = shel_atm[kcs];
lat = shel_atm[lcs];
kao = shel_ini[kcs];
lao = shel_ini[lcs];
C[0]=atom_x[kat]; C[1]=atom_y[kat]; C[2]=atom_z[kat];
D[0]=atom_x[lat]; D[1]=atom_y[lat]; D[2]=atom_z[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_ppss__(
&nijps, &psp_zeta[ijps0], &psp_dkps[ijps0],
&psp_xiza[ijps0], BA,
&nklps, &psp_zeta[klps0], &psp_dkps[klps0],
&psp_xiza[klps0], DC, AC, PPSS );
coe0 = ( kao == lao ? HALF : ONE );
for ( i=0, iao=iao0, ix=0; i<3; i++, iao++ ) {
for ( j=0, jao=jao0; j<3; j++, jao++, ix++ ) {
if ( jao > iao ) continue;
if ( fabs(PPSS[ix]) > eps_eri ) {
double x;
coe = coe0;
if ( iao == jao ) coe *= HALF;
x = coe * PPSS[ix];
etmp_val[nzeri] = x;
etmp_ind4[nzeri4+0] = iao;
etmp_ind4[nzeri4+1] = jao;
etmp_ind4[nzeri4+2] = kao;
etmp_ind4[nzeri4+3] = lao;
nzeri++;
nzeri4+=4;
}
} // for ( jao );
} // for ( iao );
if ( nzeri >= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4,
Ds, G );
nzeri = nzeri4= 0;
}
} // for ( klcs );
klcs = klcs0;
} // for ( ijcs );
*petmp_non_zero_eri = nzeri;
return 0;
}
/** (pp,ps)タイプの積分計算、および、G行列計算を行う関数
*
* @ingroup integ-twoint-direct
* */
int ofmo_twoint_direct_ppps__(
// paralleization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis set & cutoff table data
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// concerned about buffered direct method
const long *petmp_max_nzeri, long *petmp_non_zero_eri,
double etmp_val[], short int etmp_ind4[],
const int *plast_ijcs, const int *plast_klcs,
// density matrix & G-matrix data
const int *pnao, const double Ds[], double G[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int last_ijcs=*plast_ijcs, last_klcs=*plast_klcs, nao=*pnao;
long max_nzeri=*petmp_max_nzeri;
long nzeri4, nzeri=*petmp_non_zero_eri;
int Lab, Lcd, i, j, k, ix;
int ijcs, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe;
float eps_eri = ofmo_twoint_eps_eri(0);
float eps_ps4 = ofmo_twoint_eps_ps4(0);
float eps_sch = ofmo_twoint_eps_sch(0);
double PPPS[3*3*3];
Lab = La*(La+1)/2+Lb;
Lcd = Lc*(Lc+1)/2+Ld;
ijcs1 = leading_cs_pair[Lab+1];
klcs0 = leading_cs_pair[Lcd];
klcs1 = leading_cs_pair[Lcd+1];
if ( last_ijcs != -1 ) {
ijcs = last_ijcs;
klcs = last_klcs+1;
} else {
ijcs = leading_cs_pair[Lab] + workerid;
klcs = klcs0;
}
max_nzeri-=3*3*3;
if ( nzeri>= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, Ds, G );
nzeri = 0;
}
nzeri4 = nzeri<<2;
for ( ; ijcs<ijcs1; ijcs+=nworkers ) {
val_ab = csp_schwarz[ijcs];
ics = csp_ics[ijcs];
jcs = csp_jcs[ijcs];
ijps0 = csp_leading_ps_pair[ijcs];
nijps = csp_leading_ps_pair[ijcs+1]-ijps0;
iat = shel_atm[ics];
jat = shel_atm[jcs];
iao0 = shel_ini[ics];
jao0 = shel_ini[jcs];
A[0]=atom_x[iat]; A[1]=atom_y[iat]; A[2]=atom_z[iat];
B[0]=atom_x[jat]; B[1]=atom_y[jat]; B[2]=atom_z[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
for ( ; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz[klcs];
if ( val_ab*val_cd < eps_ps4 ) continue;
kcs = csp_ics[klcs];
lcs = csp_jcs[klcs];
float dmax = ofmo_twoint_dmax6(ics,jcs,kcs,lcs);
if ( dmax*val_ab*val_cd < eps_sch ) continue;
klps0 = csp_leading_ps_pair[klcs];
nklps = csp_leading_ps_pair[klcs+1]-klps0;
kat = shel_atm[kcs];
lat = shel_atm[lcs];
kao0 = shel_ini[kcs];
lao = shel_ini[lcs];
C[0]=atom_x[kat]; C[1]=atom_y[kat]; C[2]=atom_z[kat];
D[0]=atom_x[lat]; D[1]=atom_y[lat]; D[2]=atom_z[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_ppps__(
&nijps, &psp_zeta[ijps0], &psp_dkps[ijps0],
&psp_xiza[ijps0], BA,
&nklps, &psp_zeta[klps0], &psp_dkps[klps0],
&psp_xiza[klps0], DC, AC, PPPS );
for ( i=0, iao=iao0, ix=0; i<3; i++, iao++ ) {
for ( j=0, jao=jao0; j<3; j++, jao++ ) {
if ( jao>iao ) { ix += 3; continue; }
coe = ( iao==jao ? HALF : ONE );
for ( k=0, kao=kao0; k<3; k++, kao++, ix++ ) {
if ( fabs(PPPS[ix]) > eps_eri ) {
double x;
x = coe * PPPS[ix];
etmp_val[nzeri] = x;
etmp_ind4[nzeri4+0] = iao;
etmp_ind4[nzeri4+1] = jao;
etmp_ind4[nzeri4+2] = kao;
etmp_ind4[nzeri4+3] = lao;
nzeri++;
nzeri4+=4;
} // if ( fabs );
} // for ( kao )
} // for ( jao )
} // for ( iao )
if ( nzeri >= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4,
Ds, G );
nzeri = nzeri4= 0;
}
} // for ( klcs );
klcs = klcs0;
} // for ( ijcs );
*petmp_non_zero_eri = nzeri;
return 0;
}
/** (pp,pp)タイプの積分計算、および、G行列計算を行う関数
*
* @ingroup integ-twoint-direct
* */
int ofmo_twoint_direct_pppp__(
// paralleization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis set & cutoff table data
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// concerned about buffered direct method
const long *petmp_max_nzeri, long *petmp_non_zero_eri,
double etmp_val[], short int etmp_ind4[],
const int *plast_ijcs, const int *plast_klcs,
// density matrix & G-matrix data
const int *pnao, const double Ds[], double G[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int last_ijcs=*plast_ijcs, last_klcs=*plast_klcs, nao=*pnao;
long max_nzeri=*petmp_max_nzeri;
long nzeri4, nzeri=*petmp_non_zero_eri;
int Lab, Lcd, i, j, k, l, ipat;
int I2, IJ, K2, KL;
int ijcs, ijcs1;
int klcs, klcs0;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe, coe0;
float eps_eri = ofmo_twoint_eps_eri(0);
float eps_ps4 = ofmo_twoint_eps_ps4(0);
float eps_sch = ofmo_twoint_eps_sch(0);
double PPPP[3*3*3*3], pppp;
Lab = La*(La+1)/2+Lb;
Lcd = Lc*(Lc+1)/2+Ld;
ijcs1 = leading_cs_pair[Lab+1];
klcs0 = leading_cs_pair[Lcd];
if ( last_ijcs != -1 ) {
ijcs = last_ijcs;
klcs = last_klcs+1;
} else {
ijcs = leading_cs_pair[Lab] + workerid;
klcs = klcs0;
}
max_nzeri-=3*3*3*3;
if ( nzeri>= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, Ds, G );
nzeri = 0;
}
nzeri4 = nzeri<<2;
for ( ; ijcs<ijcs1; ijcs+=nworkers ) {
val_ab = csp_schwarz[ijcs];
ics = csp_ics[ijcs];
jcs = csp_jcs[ijcs];
ijps0 = csp_leading_ps_pair[ijcs];
nijps = csp_leading_ps_pair[ijcs+1]-ijps0;
iat = shel_atm[ics];
jat = shel_atm[jcs];
iao0 = shel_ini[ics];
jao0 = shel_ini[jcs];
A[0]=atom_x[iat]; A[1]=atom_y[iat]; A[2]=atom_z[iat];
B[0]=atom_x[jat]; B[1]=atom_y[jat]; B[2]=atom_z[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
for ( ; klcs<=ijcs; klcs++ ) {
val_cd = csp_schwarz[klcs];
if ( val_ab*val_cd < eps_ps4 ) continue;
kcs = csp_ics[klcs];
lcs = csp_jcs[klcs];
float dmax = ofmo_twoint_dmax6(ics,jcs,kcs,lcs);
if ( dmax*val_ab*val_cd < eps_sch ) continue;
klps0 = csp_leading_ps_pair[klcs];
nklps = csp_leading_ps_pair[klcs+1]-klps0;
kat = shel_atm[kcs];
lat = shel_atm[lcs];
kao0 = shel_ini[kcs];
lao0 = shel_ini[lcs];
C[0]=atom_x[kat]; C[1]=atom_y[kat]; C[2]=atom_z[kat];
D[0]=atom_x[lat]; D[1]=atom_y[lat]; D[2]=atom_z[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_pppp__(
&nijps, &psp_zeta[ijps0], &psp_dkps[ijps0],
&psp_xiza[ijps0], BA,
&nklps, &psp_zeta[klps0], &psp_dkps[klps0],
&psp_xiza[klps0], DC, AC, PPPP );
ipat = ( (ics==kcs && jcs>lcs) ? true : false );
for ( i=0, iao=iao0; i<3; i++, iao++ ) {
I2 = iao*(iao+1)/2;
for ( j=0, jao=jao0; j<3; j++, jao++ ) {
if ( jao>iao ) continue;
IJ = I2 + jao;
coe0 = ( iao==jao ? HALF : ONE );
for ( k=0, kao=kao0; k<3; k++, kao++ ) {
K2 = kao*(kao+1)/2;
for ( l=0, lao=lao0; l<3; l++, lao++ ) {
if ( lao>kao ) continue;
pppp = PPPP[i*27+j*9+k*3+l];
if ( fabs(pppp) > eps_eri ) {
double x;
KL = K2 + lao;
if ( IJ >= KL || ipat ) {
coe = coe0;
if ( kao==lao ) coe *= HALF;
if ( KL == IJ ) coe *= HALF;
x = coe * pppp;
etmp_val[nzeri] = x;
etmp_ind4[nzeri4+0] = iao;
etmp_ind4[nzeri4+1] = jao;
etmp_ind4[nzeri4+2] = kao;
etmp_ind4[nzeri4+3] = lao;
nzeri++;
nzeri4+=4;
}
} // if ( fabs )
} // for ( lao )
} // for ( kao )
} // for ( jao )
} // for ( iao )
if ( nzeri >= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4,
Ds, G );
nzeri = nzeri4= 0;
}
} // for ( klcs );
klcs = klcs0;
} // for ( ijcs );
*petmp_non_zero_eri = nzeri;
return 0;
}
/** (ds,ss)タイプの積分計算、および、G行列計算を行う関数
*
* @ingroup integ-twoint-direct
* */
int ofmo_twoint_direct_dsss__(
// paralleization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis set & cutoff table data
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// concerned about buffered direct method
const long *petmp_max_nzeri, long *petmp_non_zero_eri,
double etmp_val[], short int etmp_ind4[],
const int *plast_ijcs, const int *plast_klcs,
// density matrix & G-matrix data
const int *pnao, const double Ds[], double G[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int last_ijcs=*plast_ijcs, last_klcs=*plast_klcs, nao=*pnao;
long max_nzeri=*petmp_max_nzeri;
long nzeri4, nzeri=*petmp_non_zero_eri;
int Lab, Lcd, i;
int ijcs, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao;
int kcs, kat, kao, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe;
float eps_eri = ofmo_twoint_eps_eri(0);
float eps_ps4 = ofmo_twoint_eps_ps4(0);
float eps_sch = ofmo_twoint_eps_sch(0);
double DSSS[6];
Lab = La*(La+1)/2+Lb;
Lcd = Lc*(Lc+1)/2+Ld;
ijcs1 = leading_cs_pair[Lab+1];
klcs0 = leading_cs_pair[Lcd];
klcs1 = leading_cs_pair[Lcd+1];
if ( last_ijcs != -1 ) {
ijcs = last_ijcs;
klcs = last_klcs+1;
} else {
ijcs = leading_cs_pair[Lab] + workerid;
klcs = klcs0;
}
max_nzeri-=6;
if ( nzeri>= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, Ds, G );
nzeri = 0;
}
nzeri4 = nzeri<<2;
for ( ; ijcs<ijcs1; ijcs+=nworkers ) {
val_ab = csp_schwarz[ijcs];
ics = csp_ics[ijcs];
jcs = csp_jcs[ijcs];
ijps0 = csp_leading_ps_pair[ijcs];
nijps = csp_leading_ps_pair[ijcs+1]-ijps0;
iat = shel_atm[ics];
jat = shel_atm[jcs];
iao0 = shel_ini[ics];
jao = shel_ini[jcs];
A[0]=atom_x[iat]; A[1]=atom_y[iat]; A[2]=atom_z[iat];
B[0]=atom_x[jat]; B[1]=atom_y[jat]; B[2]=atom_z[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
for ( ; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz[klcs];
if ( val_ab*val_cd < eps_ps4 ) continue;
kcs = csp_ics[klcs];
lcs = csp_jcs[klcs];
float dmax = ofmo_twoint_dmax6(ics,jcs,kcs,lcs);
if ( dmax*val_ab*val_cd < eps_sch ) continue;
klps0 = csp_leading_ps_pair[klcs];
nklps = csp_leading_ps_pair[klcs+1]-klps0;
kat = shel_atm[kcs];
lat = shel_atm[lcs];
kao = shel_ini[kcs];
lao = shel_ini[lcs];
C[0]=atom_x[kat]; C[1]=atom_y[kat]; C[2]=atom_z[kat];
D[0]=atom_x[lat]; D[1]=atom_y[lat]; D[2]=atom_z[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_dsss__(
&nijps, &psp_zeta[ijps0], &psp_dkps[ijps0],
&psp_xiza[ijps0], BA,
&nklps, &psp_zeta[klps0], &psp_dkps[klps0],
&psp_xiza[klps0], DC, AC, DSSS );
coe = ( kao == lao ? HALF : ONE );
for ( i=0, iao=iao0; i<6; i++, iao++ ) {
if ( fabs(DSSS[i]) > eps_eri ) {
double x;
x = coe * DSSS[i];
etmp_val[nzeri] = x;
etmp_ind4[nzeri4+0] = iao;
etmp_ind4[nzeri4+1] = jao;
etmp_ind4[nzeri4+2] = kao;
etmp_ind4[nzeri4+3] = lao;
nzeri++;
nzeri4+=4;
}
} // for ( i, iao);
if ( nzeri >= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4,
Ds, G );
nzeri = nzeri4= 0;
}
} // for ( klcs );
klcs = klcs0;
} // for ( ijcs );
/*
// for check sum
#pragma omp atomic
check_sum[6] += lsum;
*/
*petmp_non_zero_eri = nzeri;
return 0;
}
/** (ds,ps)タイプの積分計算、および、G行列計算を行う関数
*
* @ingroup integ-twoint-direct
* */
int ofmo_twoint_direct_dsps__(
// paralleization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis set & cutoff table data
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// concerned about buffered direct method
const long *petmp_max_nzeri, long *petmp_non_zero_eri,
double etmp_val[], short int etmp_ind4[],
const int *plast_ijcs, const int *plast_klcs,
// density matrix & G-matrix data
const int *pnao, const double Ds[], double G[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int last_ijcs=*plast_ijcs, last_klcs=*plast_klcs, nao=*pnao;
long max_nzeri=*petmp_max_nzeri;
long nzeri4, nzeri=*petmp_non_zero_eri;
int Lab, Lcd, i, k, ix;
int ijcs, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao;
int kcs, kat, kao, kao0, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd;
float eps_eri = ofmo_twoint_eps_eri(0);
float eps_ps4 = ofmo_twoint_eps_ps4(0);
float eps_sch = ofmo_twoint_eps_sch(0);
double DSPS[6*3];
Lab = La*(La+1)/2+Lb;
Lcd = Lc*(Lc+1)/2+Ld;
ijcs1 = leading_cs_pair[Lab+1];
klcs0 = leading_cs_pair[Lcd];
klcs1 = leading_cs_pair[Lcd+1];
if ( last_ijcs != -1 ) {
ijcs = last_ijcs;
klcs = last_klcs+1;
} else {
ijcs = leading_cs_pair[Lab] + workerid;
klcs = klcs0;
}
max_nzeri-=6*3;
if ( nzeri>= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, Ds, G );
nzeri = 0;
}
nzeri4 = nzeri<<2;
for ( ; ijcs<ijcs1; ijcs+=nworkers ) {
val_ab = csp_schwarz[ijcs];
ics = csp_ics[ijcs];
jcs = csp_jcs[ijcs];
ijps0 = csp_leading_ps_pair[ijcs];
nijps = csp_leading_ps_pair[ijcs+1]-ijps0;
iat = shel_atm[ics];
jat = shel_atm[jcs];
iao0 = shel_ini[ics];
jao = shel_ini[jcs];
A[0]=atom_x[iat]; A[1]=atom_y[iat]; A[2]=atom_z[iat];
B[0]=atom_x[jat]; B[1]=atom_y[jat]; B[2]=atom_z[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
for ( ; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz[klcs];
if ( val_ab*val_cd < eps_ps4 ) continue;
kcs = csp_ics[klcs];
lcs = csp_jcs[klcs];
float dmax = ofmo_twoint_dmax6(ics,jcs,kcs,lcs);
if ( dmax*val_ab*val_cd < eps_sch ) continue;
klps0 = csp_leading_ps_pair[klcs];
nklps = csp_leading_ps_pair[klcs+1]-klps0;
kat = shel_atm[kcs];
lat = shel_atm[lcs];
kao0 = shel_ini[kcs];
lao = shel_ini[lcs];
C[0]=atom_x[kat]; C[1]=atom_y[kat]; C[2]=atom_z[kat];
D[0]=atom_x[lat]; D[1]=atom_y[lat]; D[2]=atom_z[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_dsps__(
&nijps, &psp_zeta[ijps0], &psp_dkps[ijps0],
&psp_xiza[ijps0], BA,
&nklps, &psp_zeta[klps0], &psp_dkps[klps0],
&psp_xiza[klps0], DC, AC, DSPS );
for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) {
for (k=0, kao=kao0; k<3; k++, kao++, ix++ ) {
if ( fabs(DSPS[ix]) > eps_eri ) {
double x;
x = DSPS[ix];
etmp_val[nzeri] = x;
etmp_ind4[nzeri4+0] = iao;
etmp_ind4[nzeri4+1] = jao;
etmp_ind4[nzeri4+2] = kao;
etmp_ind4[nzeri4+3] = lao;
nzeri++;
nzeri4+=4;
}
}
} // for ( i, iao);
if ( nzeri >= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4,
Ds, G );
nzeri = nzeri4= 0;
}
} // for ( klcs );
klcs = klcs0;
} // for ( ijcs );
*petmp_non_zero_eri = nzeri;
return 0;
}
/** (ds,pp)タイプの積分計算、および、G行列計算を行う関数
*
* @ingroup integ-twoint-direct
* */
int ofmo_twoint_direct_dspp__(
// paralleization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis set & cutoff table data
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// concerned about buffered direct method
const long *petmp_max_nzeri, long *petmp_non_zero_eri,
double etmp_val[], short int etmp_ind4[],
const int *plast_ijcs, const int *plast_klcs,
// density matrix & G-matrix data
const int *pnao, const double Ds[], double G[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int last_ijcs=*plast_ijcs, last_klcs=*plast_klcs, nao=*pnao;
long max_nzeri=*petmp_max_nzeri;
long nzeri4, nzeri=*petmp_non_zero_eri;
int Lab, Lcd, i, k, l, ix;
int ijcs, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe;
float eps_eri = ofmo_twoint_eps_eri(0);
float eps_ps4 = ofmo_twoint_eps_ps4(0);
float eps_sch = ofmo_twoint_eps_sch(0);
double DSPP[6*3*3];
Lab = La*(La+1)/2+Lb;
Lcd = Lc*(Lc+1)/2+Ld;
ijcs1 = leading_cs_pair[Lab+1];
klcs0 = leading_cs_pair[Lcd];
klcs1 = leading_cs_pair[Lcd+1];
if ( last_ijcs != -1 ) {
ijcs = last_ijcs;
klcs = last_klcs+1;
} else {
ijcs = leading_cs_pair[Lab] + workerid;
klcs = klcs0;
}
max_nzeri-=6*3*3;
if ( nzeri>= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, Ds, G );
nzeri = 0;
}
nzeri4 = nzeri<<2;
for ( ; ijcs<ijcs1; ijcs+=nworkers ) {
val_ab = csp_schwarz[ijcs];
ics = csp_ics[ijcs];
jcs = csp_jcs[ijcs];
ijps0 = csp_leading_ps_pair[ijcs];
nijps = csp_leading_ps_pair[ijcs+1]-ijps0;
iat = shel_atm[ics];
jat = shel_atm[jcs];
iao0 = shel_ini[ics];
jao = shel_ini[jcs];
A[0]=atom_x[iat]; A[1]=atom_y[iat]; A[2]=atom_z[iat];
B[0]=atom_x[jat]; B[1]=atom_y[jat]; B[2]=atom_z[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
for ( ; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz[klcs];
if ( val_ab*val_cd < eps_ps4 ) continue;
kcs = csp_ics[klcs];
lcs = csp_jcs[klcs];
float dmax = ofmo_twoint_dmax6(ics,jcs,kcs,lcs);
if ( dmax*val_ab*val_cd < eps_sch ) continue;
klps0 = csp_leading_ps_pair[klcs];
nklps = csp_leading_ps_pair[klcs+1]-klps0;
kat = shel_atm[kcs];
lat = shel_atm[lcs];
kao0 = shel_ini[kcs];
lao0 = shel_ini[lcs];
C[0]=atom_x[kat]; C[1]=atom_y[kat]; C[2]=atom_z[kat];
D[0]=atom_x[lat]; D[1]=atom_y[lat]; D[2]=atom_z[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_dspp__(
&nijps, &psp_zeta[ijps0], &psp_dkps[ijps0],
&psp_xiza[ijps0], BA,
&nklps, &psp_zeta[klps0], &psp_dkps[klps0],
&psp_xiza[klps0], DC, AC, DSPP );
for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) {
for (k=0, kao=kao0; k<3; k++, kao++ ) {
for ( l=0, lao=lao0; l<3; l++, lao++, ix++ ) {
if ( lao > kao ) continue;
coe = ( kao == lao ? HALF : ONE );
if ( fabs(DSPP[ix]) > eps_eri ) {
double x;
x = coe * DSPP[ix];
etmp_val[nzeri] = x;
etmp_ind4[nzeri4+0] = iao;
etmp_ind4[nzeri4+1] = jao;
etmp_ind4[nzeri4+2] = kao;
etmp_ind4[nzeri4+3] = lao;
nzeri++;
nzeri4+=4;
}
}
}
} // for ( i, iao);
if ( nzeri >= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4,
Ds, G );
nzeri = nzeri4= 0;
}
} // for ( klcs );
klcs = klcs0;
} // for ( ijcs );
*petmp_non_zero_eri = nzeri;
return 0;
}
/** (ds,ds)タイプの積分計算、および、G行列計算を行う関数
*
* @ingroup integ-twoint-direct
* */
int ofmo_twoint_direct_dsds__(
// paralleization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis set & cutoff table data
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// concerned about buffered direct method
const long *petmp_max_nzeri, long *petmp_non_zero_eri,
double etmp_val[], short int etmp_ind4[],
const int *plast_ijcs, const int *plast_klcs,
// density matrix & G-matrix data
const int *pnao, const double Ds[], double G[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int last_ijcs=*plast_ijcs, last_klcs=*plast_klcs, nao=*pnao;
long max_nzeri=*petmp_max_nzeri;
long nzeri4, nzeri=*petmp_non_zero_eri;
int Lab, Lcd, i, k, ix;
int IJ, KL, ipat;
int ijcs, ijcs1;
int klcs, klcs0;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao;
int kcs, kat, kao, kao0, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe;
float eps_eri = ofmo_twoint_eps_eri(0);
float eps_ps4 = ofmo_twoint_eps_ps4(0);
float eps_sch = ofmo_twoint_eps_sch(0);
double DSDS[6*6];
Lab = La*(La+1)/2+Lb;
Lcd = Lc*(Lc+1)/2+Ld;
ijcs1 = leading_cs_pair[Lab+1];
klcs0 = leading_cs_pair[Lcd];
if ( last_ijcs != -1 ) {
ijcs = last_ijcs;
klcs = last_klcs+1;
} else {
ijcs = leading_cs_pair[Lab] + workerid;
klcs = klcs0;
}
max_nzeri-=6*6;
if ( nzeri>= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, Ds, G );
nzeri = 0;
}
nzeri4 = nzeri<<2;
for ( ; ijcs<ijcs1; ijcs+=nworkers ) {
val_ab = csp_schwarz[ijcs];
ics = csp_ics[ijcs];
jcs = csp_jcs[ijcs];
ijps0 = csp_leading_ps_pair[ijcs];
nijps = csp_leading_ps_pair[ijcs+1]-ijps0;
iat = shel_atm[ics];
jat = shel_atm[jcs];
iao0 = shel_ini[ics];
jao = shel_ini[jcs];
A[0]=atom_x[iat]; A[1]=atom_y[iat]; A[2]=atom_z[iat];
B[0]=atom_x[jat]; B[1]=atom_y[jat]; B[2]=atom_z[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
for ( ; klcs<=ijcs; klcs++ ) {
val_cd = csp_schwarz[klcs];
if ( val_ab*val_cd < eps_ps4 ) continue;
kcs = csp_ics[klcs];
lcs = csp_jcs[klcs];
float dmax = ofmo_twoint_dmax6(ics,jcs,kcs,lcs);
if ( dmax*val_ab*val_cd < eps_sch ) continue;
klps0 = csp_leading_ps_pair[klcs];
nklps = csp_leading_ps_pair[klcs+1]-klps0;
kat = shel_atm[kcs];
lat = shel_atm[lcs];
kao0 = shel_ini[kcs];
lao = shel_ini[lcs];
C[0]=atom_x[kat]; C[1]=atom_y[kat]; C[2]=atom_z[kat];
D[0]=atom_x[lat]; D[1]=atom_y[lat]; D[2]=atom_z[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_dsds__(
&nijps, &psp_zeta[ijps0], &psp_dkps[ijps0],
&psp_xiza[ijps0], BA,
&nklps, &psp_zeta[klps0], &psp_dkps[klps0],
&psp_xiza[klps0], DC, AC, DSDS );
ipat = ( (ics==kcs && jcs>lcs) ? true : false);
for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) {
IJ = ( (iao*iao+iao)>>1) + jao;
for ( k=0, kao=kao0; k<6; k++, kao++, ix++ ) {
KL = ( (kao*kao+kao)>>1) + lao;
if ( fabs(DSDS[ix]) <= eps_eri ) continue;
if ( IJ >= KL || ipat ) {
double x;
coe = ONE;
if ( iao==kao && jao==lao ) coe = HALF;
x = coe * DSDS[ix];
etmp_val[nzeri] = x;
etmp_ind4[nzeri4+0] = iao;
etmp_ind4[nzeri4+1] = jao;
etmp_ind4[nzeri4+2] = kao;
etmp_ind4[nzeri4+3] = lao;
nzeri++;
nzeri4+=4;
}
} // for ( kao )
} // for ( iao )
if ( nzeri >= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4,
Ds, G );
nzeri = nzeri4= 0;
}
} // for ( klcs );
klcs = klcs0;
} // for ( ijcs );
*petmp_non_zero_eri = nzeri;
return 0;
}
/** (dp,ss)タイプの積分計算、および、G行列計算を行う関数
*
* @ingroup integ-twoint-direct
* */
int ofmo_twoint_direct_dpss__(
// paralleization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis set & cutoff table data
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// concerned about buffered direct method
const long *petmp_max_nzeri, long *petmp_non_zero_eri,
double etmp_val[], short int etmp_ind4[],
const int *plast_ijcs, const int *plast_klcs,
// density matrix & G-matrix data
const int *pnao, const double Ds[], double G[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int last_ijcs=*plast_ijcs, last_klcs=*plast_klcs, nao=*pnao;
long max_nzeri=*petmp_max_nzeri;
long nzeri4, nzeri=*petmp_non_zero_eri;
int Lab, Lcd, i, j, ix;
int ijcs, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe;
float eps_eri = ofmo_twoint_eps_eri(0);
float eps_ps4 = ofmo_twoint_eps_ps4(0);
float eps_sch = ofmo_twoint_eps_sch(0);
double DPSS[6*3];
Lab = La*(La+1)/2+Lb;
Lcd = Lc*(Lc+1)/2+Ld;
ijcs1 = leading_cs_pair[Lab+1];
klcs0 = leading_cs_pair[Lcd];
klcs1 = leading_cs_pair[Lcd+1];
if ( last_ijcs != -1 ) {
ijcs = last_ijcs;
klcs = last_klcs+1;
} else {
ijcs = leading_cs_pair[Lab] + workerid;
klcs = klcs0;
}
max_nzeri-=6*3;
if ( nzeri>= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, Ds, G );
nzeri = 0;
}
nzeri4 = nzeri<<2;
for ( ; ijcs<ijcs1; ijcs+=nworkers ) {
val_ab = csp_schwarz[ijcs];
ics = csp_ics[ijcs];
jcs = csp_jcs[ijcs];
ijps0 = csp_leading_ps_pair[ijcs];
nijps = csp_leading_ps_pair[ijcs+1]-ijps0;
iat = shel_atm[ics];
jat = shel_atm[jcs];
iao0 = shel_ini[ics];
jao0 = shel_ini[jcs];
A[0]=atom_x[iat]; A[1]=atom_y[iat]; A[2]=atom_z[iat];
B[0]=atom_x[jat]; B[1]=atom_y[jat]; B[2]=atom_z[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
for ( ; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz[klcs];
if ( val_ab*val_cd < eps_ps4 ) continue;
kcs = csp_ics[klcs];
lcs = csp_jcs[klcs];
float dmax = ofmo_twoint_dmax6(ics,jcs,kcs,lcs);
if ( dmax*val_ab*val_cd < eps_sch ) continue;
klps0 = csp_leading_ps_pair[klcs];
nklps = csp_leading_ps_pair[klcs+1]-klps0;
kat = shel_atm[kcs];
lat = shel_atm[lcs];
kao = shel_ini[kcs];
lao = shel_ini[lcs];
C[0]=atom_x[kat]; C[1]=atom_y[kat]; C[2]=atom_z[kat];
D[0]=atom_x[lat]; D[1]=atom_y[lat]; D[2]=atom_z[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_dpss__(
&nijps, &psp_zeta[ijps0], &psp_dkps[ijps0],
&psp_xiza[ijps0], BA,
&nklps, &psp_zeta[klps0], &psp_dkps[klps0],
&psp_xiza[klps0], DC, AC, DPSS );
coe = ( kao == lao ? HALF : ONE );
for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) {
for ( j=0, jao=jao0; j<3; j++, jao++, ix++ ) {
if ( fabs(DPSS[ix]) > eps_eri ) {
double x;
x = coe * DPSS[ix];
etmp_val[nzeri] = x;
etmp_ind4[nzeri4+0] = iao;
etmp_ind4[nzeri4+1] = jao;
etmp_ind4[nzeri4+2] = kao;
etmp_ind4[nzeri4+3] = lao;
nzeri++;
nzeri4+=4;
}
}
} // for ( i, iao);
if ( nzeri >= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4,
Ds, G );
nzeri = nzeri4= 0;
}
} // for ( klcs );
klcs = klcs0;
} // for ( ijcs );
*petmp_non_zero_eri = nzeri;
return 0;
}
/** (dp,ps)タイプの積分計算、および、G行列計算を行う関数
*
* @ingroup integ-twoint-direct
* */
int ofmo_twoint_direct_dpps__(
// paralleization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis set & cutoff table data
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// concerned about buffered direct method
const long *petmp_max_nzeri, long *petmp_non_zero_eri,
double etmp_val[], short int etmp_ind4[],
const int *plast_ijcs, const int *plast_klcs,
// density matrix & G-matrix data
const int *pnao, const double Ds[], double G[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int last_ijcs=*plast_ijcs, last_klcs=*plast_klcs, nao=*pnao;
long max_nzeri=*petmp_max_nzeri;
long nzeri4, nzeri=*petmp_non_zero_eri;
int Lab, Lcd, i, j, k, ix;
int ijcs, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd;
float eps_eri = ofmo_twoint_eps_eri(0);
float eps_ps4 = ofmo_twoint_eps_ps4(0);
float eps_sch = ofmo_twoint_eps_sch(0);
double DPPS[6*3*3];
Lab = La*(La+1)/2+Lb;
Lcd = Lc*(Lc+1)/2+Ld;
ijcs1 = leading_cs_pair[Lab+1];
klcs0 = leading_cs_pair[Lcd];
klcs1 = leading_cs_pair[Lcd+1];
if ( last_ijcs != -1 ) {
ijcs = last_ijcs;
klcs = last_klcs+1;
} else {
ijcs = leading_cs_pair[Lab] + workerid;
klcs = klcs0;
}
max_nzeri-=6*3*3;
if ( nzeri>= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, Ds, G );
nzeri = 0;
}
nzeri4 = nzeri<<2;
for ( ; ijcs<ijcs1; ijcs+=nworkers ) {
val_ab = csp_schwarz[ijcs];
ics = csp_ics[ijcs];
jcs = csp_jcs[ijcs];
ijps0 = csp_leading_ps_pair[ijcs];
nijps = csp_leading_ps_pair[ijcs+1]-ijps0;
iat = shel_atm[ics];
jat = shel_atm[jcs];
iao0 = shel_ini[ics];
jao0 = shel_ini[jcs];
A[0]=atom_x[iat]; A[1]=atom_y[iat]; A[2]=atom_z[iat];
B[0]=atom_x[jat]; B[1]=atom_y[jat]; B[2]=atom_z[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
for ( ; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz[klcs];
if ( val_ab*val_cd < eps_ps4 ) continue;
kcs = csp_ics[klcs];
lcs = csp_jcs[klcs];
float dmax = ofmo_twoint_dmax6(ics,jcs,kcs,lcs);
if ( dmax*val_ab*val_cd < eps_sch ) continue;
klps0 = csp_leading_ps_pair[klcs];
nklps = csp_leading_ps_pair[klcs+1]-klps0;
kat = shel_atm[kcs];
lat = shel_atm[lcs];
kao0 = shel_ini[kcs];
lao = shel_ini[lcs];
C[0]=atom_x[kat]; C[1]=atom_y[kat]; C[2]=atom_z[kat];
D[0]=atom_x[lat]; D[1]=atom_y[lat]; D[2]=atom_z[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_dpps__(
&nijps, &psp_zeta[ijps0], &psp_dkps[ijps0],
&psp_xiza[ijps0], BA,
&nklps, &psp_zeta[klps0], &psp_dkps[klps0],
&psp_xiza[klps0], DC, AC, DPPS );
for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) {
for ( j=0, jao=jao0; j<3; j++, jao++ ) {
for (k=0, kao=kao0; k<3; k++, kao++, ix++ ) {
if ( fabs(DPPS[ix]) > eps_eri ) {
double x;
x = DPPS[ix];
etmp_val[nzeri] = x;
etmp_ind4[nzeri4+0] = iao;
etmp_ind4[nzeri4+1] = jao;
etmp_ind4[nzeri4+2] = kao;
etmp_ind4[nzeri4+3] = lao;
nzeri++;
nzeri4+=4;
}
}
}
} // for ( i, iao);
if ( nzeri >= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4,
Ds, G );
nzeri = nzeri4= 0;
}
} // for ( klcs );
klcs = klcs0;
} // for ( ijcs );
*petmp_non_zero_eri = nzeri;
return 0;
}
/** (dp,pp)タイプの積分計算、および、G行列計算を行う関数
*
* @ingroup integ-twoint-direct
* */
int ofmo_twoint_direct_dppp__(
// paralleization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis set & cutoff table data
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// concerned about buffered direct method
const long *petmp_max_nzeri, long *petmp_non_zero_eri,
double etmp_val[], short int etmp_ind4[],
const int *plast_ijcs, const int *plast_klcs,
// density matrix & G-matrix data
const int *pnao, const double Ds[], double G[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int last_ijcs=*plast_ijcs, last_klcs=*plast_klcs, nao=*pnao;
long max_nzeri=*petmp_max_nzeri;
long nzeri4, nzeri=*petmp_non_zero_eri;
int Lab, Lcd, i, j, k, l, ix;
int ijcs, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe;
float eps_eri = ofmo_twoint_eps_eri(0);
float eps_ps4 = ofmo_twoint_eps_ps4(0);
float eps_sch = ofmo_twoint_eps_sch(0);
double DPPP[6*3*3*3];
Lab = La*(La+1)/2+Lb;
Lcd = Lc*(Lc+1)/2+Ld;
ijcs1 = leading_cs_pair[Lab+1];
klcs0 = leading_cs_pair[Lcd];
klcs1 = leading_cs_pair[Lcd+1];
if ( last_ijcs != -1 ) {
ijcs = last_ijcs;
klcs = last_klcs+1;
} else {
ijcs = leading_cs_pair[Lab] + workerid;
klcs = klcs0;
}
max_nzeri-=6*3*3*3;
if ( nzeri>= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, Ds, G );
nzeri = 0;
}
nzeri4 = nzeri<<2;
for ( ; ijcs<ijcs1; ijcs+=nworkers ) {
val_ab = csp_schwarz[ijcs];
ics = csp_ics[ijcs];
jcs = csp_jcs[ijcs];
ijps0 = csp_leading_ps_pair[ijcs];
nijps = csp_leading_ps_pair[ijcs+1]-ijps0;
iat = shel_atm[ics];
jat = shel_atm[jcs];
iao0 = shel_ini[ics];
jao0 = shel_ini[jcs];
A[0]=atom_x[iat]; A[1]=atom_y[iat]; A[2]=atom_z[iat];
B[0]=atom_x[jat]; B[1]=atom_y[jat]; B[2]=atom_z[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
for ( ; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz[klcs];
if ( val_ab*val_cd < eps_ps4 ) continue;
kcs = csp_ics[klcs];
lcs = csp_jcs[klcs];
float dmax = ofmo_twoint_dmax6(ics,jcs,kcs,lcs);
if ( dmax*val_ab*val_cd < eps_sch ) continue;
klps0 = csp_leading_ps_pair[klcs];
nklps = csp_leading_ps_pair[klcs+1]-klps0;
kat = shel_atm[kcs];
lat = shel_atm[lcs];
kao0 = shel_ini[kcs];
lao0 = shel_ini[lcs];
C[0]=atom_x[kat]; C[1]=atom_y[kat]; C[2]=atom_z[kat];
D[0]=atom_x[lat]; D[1]=atom_y[lat]; D[2]=atom_z[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_dppp__(
&nijps, &psp_zeta[ijps0], &psp_dkps[ijps0],
&psp_xiza[ijps0], BA,
&nklps, &psp_zeta[klps0], &psp_dkps[klps0],
&psp_xiza[klps0], DC, AC, DPPP );
for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) {
for ( j=0, jao=jao0; j<3; j++, jao++ ) {
for (k=0, kao=kao0; k<3; k++, kao++ ) {
for ( l=0, lao=lao0; l<3; l++, lao++, ix++ ) {
if ( lao > kao ) continue;
coe = ( kao == lao ? HALF : ONE );
if ( fabs(DPPP[ix]) > eps_eri ) {
double x;
x = coe * DPPP[ix];
etmp_val[nzeri] = x;
etmp_ind4[nzeri4+0] = iao;
etmp_ind4[nzeri4+1] = jao;
etmp_ind4[nzeri4+2] = kao;
etmp_ind4[nzeri4+3] = lao;
nzeri++;
nzeri4+=4;
}
}
}
}
} // for ( i, iao);
if ( nzeri >= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4,
Ds, G );
nzeri = nzeri4= 0;
}
} // for ( klcs );
klcs = klcs0;
} // for ( ijcs );
*petmp_non_zero_eri = nzeri;
return 0;
}
/** (dp,ds)タイプの積分計算、および、G行列計算を行う関数
*
* @ingroup integ-twoint-direct
* */
int ofmo_twoint_direct_dpds__(
// paralleization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis set & cutoff table data
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// concerned about buffered direct method
const long *petmp_max_nzeri, long *petmp_non_zero_eri,
double etmp_val[], short int etmp_ind4[],
const int *plast_ijcs, const int *plast_klcs,
// density matrix & G-matrix data
const int *pnao, const double Ds[], double G[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int last_ijcs=*plast_ijcs, last_klcs=*plast_klcs, nao=*pnao;
long max_nzeri=*petmp_max_nzeri;
long nzeri4, nzeri=*petmp_non_zero_eri;
int Lab, Lcd, i, j, k, ix;
int ijcs, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd;
float eps_eri = ofmo_twoint_eps_eri(0);
float eps_ps4 = ofmo_twoint_eps_ps4(0);
float eps_sch = ofmo_twoint_eps_sch(0);
double DPDS[6*3*6];
Lab = La*(La+1)/2+Lb;
Lcd = Lc*(Lc+1)/2+Ld;
ijcs1 = leading_cs_pair[Lab+1];
klcs0 = leading_cs_pair[Lcd];
klcs1 = leading_cs_pair[Lcd+1];
if ( last_ijcs != -1 ) {
ijcs = last_ijcs;
klcs = last_klcs+1;
} else {
ijcs = leading_cs_pair[Lab] + workerid;
klcs = klcs0;
}
max_nzeri-=6*3*6;
if ( nzeri>= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, Ds, G );
nzeri = 0;
}
nzeri4 = nzeri<<2;
for ( ; ijcs<ijcs1; ijcs+=nworkers ) {
val_ab = csp_schwarz[ijcs];
ics = csp_ics[ijcs];
jcs = csp_jcs[ijcs];
ijps0 = csp_leading_ps_pair[ijcs];
nijps = csp_leading_ps_pair[ijcs+1]-ijps0;
iat = shel_atm[ics];
jat = shel_atm[jcs];
iao0 = shel_ini[ics];
jao0 = shel_ini[jcs];
A[0]=atom_x[iat]; A[1]=atom_y[iat]; A[2]=atom_z[iat];
B[0]=atom_x[jat]; B[1]=atom_y[jat]; B[2]=atom_z[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
for ( ; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz[klcs];
if ( val_ab*val_cd < eps_ps4 ) continue;
kcs = csp_ics[klcs];
lcs = csp_jcs[klcs];
float dmax = ofmo_twoint_dmax6(ics,jcs,kcs,lcs);
if ( dmax*val_ab*val_cd < eps_sch ) continue;
klps0 = csp_leading_ps_pair[klcs];
nklps = csp_leading_ps_pair[klcs+1]-klps0;
kat = shel_atm[kcs];
lat = shel_atm[lcs];
kao0 = shel_ini[kcs];
lao = shel_ini[lcs];
C[0]=atom_x[kat]; C[1]=atom_y[kat]; C[2]=atom_z[kat];
D[0]=atom_x[lat]; D[1]=atom_y[lat]; D[2]=atom_z[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_dpds__(
&nijps, &psp_zeta[ijps0], &psp_dkps[ijps0],
&psp_xiza[ijps0], BA,
&nklps, &psp_zeta[klps0], &psp_dkps[klps0],
&psp_xiza[klps0], DC, AC, DPDS );
for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) {
for ( j=0, jao=jao0; j<3; j++, jao++ ) {
for (k=0, kao=kao0; k<6; k++, kao++, ix++ ) {
if ( fabs(DPDS[ix]) > eps_eri ) {
double x;
x = DPDS[ix];
etmp_val[nzeri] = x;
etmp_ind4[nzeri4+0] = iao;
etmp_ind4[nzeri4+1] = jao;
etmp_ind4[nzeri4+2] = kao;
etmp_ind4[nzeri4+3] = lao;
nzeri++;
nzeri4+=4;
}
}
}
} // for ( i, iao);
if ( nzeri >= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4,
Ds, G );
nzeri = nzeri4= 0;
}
} // for ( klcs );
klcs = klcs0;
} // for ( ijcs );
*petmp_non_zero_eri = nzeri;
return 0;
}
/** (dp,dp)タイプの積分計算、および、G行列計算を行う関数
*
* @ingroup integ-twoint-direct
* */
int ofmo_twoint_direct_dpdp__(
// paralleization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis set & cutoff table data
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// concerned about buffered direct method
const long *petmp_max_nzeri, long *petmp_non_zero_eri,
double etmp_val[], short int etmp_ind4[],
const int *plast_ijcs, const int *plast_klcs,
// density matrix & G-matrix data
const int *pnao, const double Ds[], double G[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int last_ijcs=*plast_ijcs, last_klcs=*plast_klcs, nao=*pnao;
long max_nzeri=*petmp_max_nzeri;
long nzeri4, nzeri=*petmp_non_zero_eri;
int Lab, Lcd, i, j, k, l, ix, ipat;
int IJ, KL;
int ijcs, ijcs1;
int klcs, klcs0;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe;
float eps_eri = ofmo_twoint_eps_eri(0);
float eps_ps4 = ofmo_twoint_eps_ps4(0);
float eps_sch = ofmo_twoint_eps_sch(0);
double DPDP[6*3*6*3];
Lab = La*(La+1)/2+Lb;
Lcd = Lc*(Lc+1)/2+Ld;
ijcs1 = leading_cs_pair[Lab+1];
klcs0 = leading_cs_pair[Lcd];
if ( last_ijcs != -1 ) {
ijcs = last_ijcs;
klcs = last_klcs+1;
} else {
ijcs = leading_cs_pair[Lab] + workerid;
klcs = klcs0;
}
max_nzeri-=6*3*6*3;
if ( nzeri>= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, Ds, G );
nzeri = 0;
}
nzeri4 = nzeri<<2;
for ( ; ijcs<ijcs1; ijcs+=nworkers ) {
val_ab = csp_schwarz[ijcs];
ics = csp_ics[ijcs];
jcs = csp_jcs[ijcs];
ijps0 = csp_leading_ps_pair[ijcs];
nijps = csp_leading_ps_pair[ijcs+1]-ijps0;
iat = shel_atm[ics];
jat = shel_atm[jcs];
iao0 = shel_ini[ics];
jao0 = shel_ini[jcs];
A[0]=atom_x[iat]; A[1]=atom_y[iat]; A[2]=atom_z[iat];
B[0]=atom_x[jat]; B[1]=atom_y[jat]; B[2]=atom_z[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
for ( ; klcs<=ijcs; klcs++ ) {
val_cd = csp_schwarz[klcs];
if ( val_ab*val_cd < eps_ps4 ) continue;
kcs = csp_ics[klcs];
lcs = csp_jcs[klcs];
float dmax = ofmo_twoint_dmax6(ics,jcs,kcs,lcs);
if ( dmax*val_ab*val_cd < eps_sch ) continue;
klps0 = csp_leading_ps_pair[klcs];
nklps = csp_leading_ps_pair[klcs+1]-klps0;
kat = shel_atm[kcs];
lat = shel_atm[lcs];
kao0 = shel_ini[kcs];
lao0 = shel_ini[lcs];
C[0]=atom_x[kat]; C[1]=atom_y[kat]; C[2]=atom_z[kat];
D[0]=atom_x[lat]; D[1]=atom_y[lat]; D[2]=atom_z[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_dpdp__(
&nijps, &psp_zeta[ijps0], &psp_dkps[ijps0],
&psp_xiza[ijps0], BA,
&nklps, &psp_zeta[klps0], &psp_dkps[klps0],
&psp_xiza[klps0], DC, AC, DPDP );
ipat = ( (ics==kcs && jcs>lcs) ? true : false );
for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) {
for ( j=0, jao=jao0; j<3; j++, jao++ ) {
IJ = ((iao*iao+iao)>>1) + jao;
for ( k=0, kao=kao0; k<6; k++, kao++ ) {
for ( l=0, lao=lao0; l<3; l++, lao++, ix++ ) {
KL = ((kao*kao+kao)>>1) + lao;
if ( fabs(DPDP[ix]) > eps_eri ) {
if ( IJ>=KL || ipat ) {
double x;
coe = ( IJ==KL ? HALF : ONE );
x = coe * DPDP[ix];
etmp_val[nzeri] = x;
etmp_ind4[nzeri4+0] = iao;
etmp_ind4[nzeri4+1] = jao;
etmp_ind4[nzeri4+2] = kao;
etmp_ind4[nzeri4+3] = lao;
nzeri++;
nzeri4+=4;
}
}
}
} // for ( kao )
}
} // for ( iao )
if ( nzeri >= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4,
Ds, G );
nzeri = nzeri4= 0;
}
} // for ( klcs );
klcs = klcs0;
} // for ( ijcs );
*petmp_non_zero_eri = nzeri;
return 0;
}
/** (dd,ss)タイプの積分計算、および、G行列計算を行う関数
*
* @ingroup integ-twoint-direct
* */
int ofmo_twoint_direct_ddss__(
// paralleization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis set & cutoff table data
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// concerned about buffered direct method
const long *petmp_max_nzeri, long *petmp_non_zero_eri,
double etmp_val[], short int etmp_ind4[],
const int *plast_ijcs, const int *plast_klcs,
// density matrix & G-matrix data
const int *pnao, const double Ds[], double G[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int last_ijcs=*plast_ijcs, last_klcs=*plast_klcs, nao=*pnao;
long max_nzeri=*petmp_max_nzeri;
long nzeri4, nzeri=*petmp_non_zero_eri;
int Lab, Lcd, i, j, ix;
int ijcs, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe, coe0;
float eps_eri = ofmo_twoint_eps_eri(0);
float eps_ps4 = ofmo_twoint_eps_ps4(0);
float eps_sch = ofmo_twoint_eps_sch(0);
double DDSS[6*6];
Lab = La*(La+1)/2+Lb;
Lcd = Lc*(Lc+1)/2+Ld;
ijcs1 = leading_cs_pair[Lab+1];
klcs0 = leading_cs_pair[Lcd];
klcs1 = leading_cs_pair[Lcd+1];
if ( last_ijcs != -1 ) {
ijcs = last_ijcs;
klcs = last_klcs+1;
} else {
ijcs = leading_cs_pair[Lab] + workerid;
klcs = klcs0;
}
max_nzeri-=6*6;
if ( nzeri>= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, Ds, G );
nzeri = 0;
}
nzeri4 = nzeri<<2;
for ( ; ijcs<ijcs1; ijcs+=nworkers ) {
val_ab = csp_schwarz[ijcs];
ics = csp_ics[ijcs];
jcs = csp_jcs[ijcs];
ijps0 = csp_leading_ps_pair[ijcs];
nijps = csp_leading_ps_pair[ijcs+1]-ijps0;
iat = shel_atm[ics];
jat = shel_atm[jcs];
iao0 = shel_ini[ics];
jao0 = shel_ini[jcs];
A[0]=atom_x[iat]; A[1]=atom_y[iat]; A[2]=atom_z[iat];
B[0]=atom_x[jat]; B[1]=atom_y[jat]; B[2]=atom_z[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
for ( ; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz[klcs];
if ( val_ab*val_cd < eps_ps4 ) continue;
kcs = csp_ics[klcs];
lcs = csp_jcs[klcs];
float dmax = ofmo_twoint_dmax6(ics,jcs,kcs,lcs);
if ( dmax*val_ab*val_cd < eps_sch ) continue;
klps0 = csp_leading_ps_pair[klcs];
nklps = csp_leading_ps_pair[klcs+1]-klps0;
kat = shel_atm[kcs];
lat = shel_atm[lcs];
kao = shel_ini[kcs];
lao = shel_ini[lcs];
C[0]=atom_x[kat]; C[1]=atom_y[kat]; C[2]=atom_z[kat];
D[0]=atom_x[lat]; D[1]=atom_y[lat]; D[2]=atom_z[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_ddss__(
&nijps, &psp_zeta[ijps0], &psp_dkps[ijps0],
&psp_xiza[ijps0], BA,
&nklps, &psp_zeta[klps0], &psp_dkps[klps0],
&psp_xiza[klps0], DC, AC, DDSS );
coe0 = ( kao == lao ? HALF : ONE );
for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) {
for ( j=0, jao=jao0; j<6; j++, jao++, ix++ ) {
if ( jao > iao ) continue;
if ( fabs(DDSS[ix]) > eps_eri ) {
double x;
coe = coe0;
if ( iao == jao ) coe *= HALF;
x = coe * DDSS[ix];
etmp_val[nzeri] = x;
etmp_ind4[nzeri4+0] = iao;
etmp_ind4[nzeri4+1] = jao;
etmp_ind4[nzeri4+2] = kao;
etmp_ind4[nzeri4+3] = lao;
nzeri++;
nzeri4+=4;
}
} // for ( jao );
} // for ( iao );
if ( nzeri >= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4,
Ds, G );
nzeri = nzeri4= 0;
}
} // for ( klcs );
klcs = klcs0;
} // for ( ijcs );
*petmp_non_zero_eri = nzeri;
return 0;
}
/** (dd,ps)タイプの積分計算、および、G行列計算を行う関数
*
* @ingroup integ-twoint-direct
* */
int ofmo_twoint_direct_ddps__(
// paralleization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis set & cutoff table data
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// concerned about buffered direct method
const long *petmp_max_nzeri, long *petmp_non_zero_eri,
double etmp_val[], short int etmp_ind4[],
const int *plast_ijcs, const int *plast_klcs,
// density matrix & G-matrix data
const int *pnao, const double Ds[], double G[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int last_ijcs=*plast_ijcs, last_klcs=*plast_klcs, nao=*pnao;
long max_nzeri=*petmp_max_nzeri;
long nzeri4, nzeri=*petmp_non_zero_eri;
int Lab, Lcd, i, j, k, ix;
int ijcs, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe;
float eps_eri = ofmo_twoint_eps_eri(0);
float eps_ps4 = ofmo_twoint_eps_ps4(0);
float eps_sch = ofmo_twoint_eps_sch(0);
double DDPS[6*6*3];
Lab = La*(La+1)/2+Lb;
Lcd = Lc*(Lc+1)/2+Ld;
ijcs1 = leading_cs_pair[Lab+1];
klcs0 = leading_cs_pair[Lcd];
klcs1 = leading_cs_pair[Lcd+1];
if ( last_ijcs != -1 ) {
ijcs = last_ijcs;
klcs = last_klcs+1;
} else {
ijcs = leading_cs_pair[Lab] + workerid;
klcs = klcs0;
}
max_nzeri-=6*6*3;
if ( nzeri>= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, Ds, G );
nzeri = 0;
}
nzeri4 = nzeri<<2;
for ( ; ijcs<ijcs1; ijcs+=nworkers ) {
val_ab = csp_schwarz[ijcs];
ics = csp_ics[ijcs];
jcs = csp_jcs[ijcs];
ijps0 = csp_leading_ps_pair[ijcs];
nijps = csp_leading_ps_pair[ijcs+1]-ijps0;
iat = shel_atm[ics];
jat = shel_atm[jcs];
iao0 = shel_ini[ics];
jao0 = shel_ini[jcs];
A[0]=atom_x[iat]; A[1]=atom_y[iat]; A[2]=atom_z[iat];
B[0]=atom_x[jat]; B[1]=atom_y[jat]; B[2]=atom_z[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
for ( ; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz[klcs];
if ( val_ab*val_cd < eps_ps4 ) continue;
kcs = csp_ics[klcs];
lcs = csp_jcs[klcs];
float dmax = ofmo_twoint_dmax6(ics,jcs,kcs,lcs);
if ( dmax*val_ab*val_cd < eps_sch ) continue;
klps0 = csp_leading_ps_pair[klcs];
nklps = csp_leading_ps_pair[klcs+1]-klps0;
kat = shel_atm[kcs];
lat = shel_atm[lcs];
kao0 = shel_ini[kcs];
lao = shel_ini[lcs];
C[0]=atom_x[kat]; C[1]=atom_y[kat]; C[2]=atom_z[kat];
D[0]=atom_x[lat]; D[1]=atom_y[lat]; D[2]=atom_z[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_ddps__(
&nijps, &psp_zeta[ijps0], &psp_dkps[ijps0],
&psp_xiza[ijps0], BA,
&nklps, &psp_zeta[klps0], &psp_dkps[klps0],
&psp_xiza[klps0], DC, AC, DDPS );
for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) {
for ( j=0, jao=jao0; j<6; j++, jao++ ) {
if ( jao>iao ) { ix += 3; continue; }
coe = ( iao==jao ? HALF : ONE );
for ( k=0, kao=kao0; k<3; k++, kao++, ix++ ) {
if ( fabs(DDPS[ix]) > eps_eri ) {
double x;
x = coe * DDPS[ix];
etmp_val[nzeri] = x;
etmp_ind4[nzeri4+0] = iao;
etmp_ind4[nzeri4+1] = jao;
etmp_ind4[nzeri4+2] = kao;
etmp_ind4[nzeri4+3] = lao;
nzeri++;
nzeri4+=4;
} // if ( fabs );
} // for ( kao )
} // for ( jao )
} // for ( iao )
if ( nzeri >= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4,
Ds, G );
nzeri = nzeri4= 0;
}
} // for ( klcs );
klcs = klcs0;
} // for ( ijcs );
*petmp_non_zero_eri = nzeri;
return 0;
}
/** (dd,pp)タイプの積分計算、および、G行列計算を行う関数
*
* @ingroup integ-twoint-direct
* */
int ofmo_twoint_direct_ddpp__(
// paralleization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis set & cutoff table data
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// concerned about buffered direct method
const long *petmp_max_nzeri, long *petmp_non_zero_eri,
double etmp_val[], short int etmp_ind4[],
const int *plast_ijcs, const int *plast_klcs,
// density matrix & G-matrix data
const int *pnao, const double Ds[], double G[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int last_ijcs=*plast_ijcs, last_klcs=*plast_klcs, nao=*pnao;
long max_nzeri=*petmp_max_nzeri;
long nzeri4, nzeri=*petmp_non_zero_eri;
int Lab, Lcd, i, j, k, l, ix;
int ijcs, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe, coe0;
float eps_eri = ofmo_twoint_eps_eri(0);
float eps_ps4 = ofmo_twoint_eps_ps4(0);
float eps_sch = ofmo_twoint_eps_sch(0);
double DDPP[6*6*3*3];
Lab = La*(La+1)/2+Lb;
Lcd = Lc*(Lc+1)/2+Ld;
ijcs1 = leading_cs_pair[Lab+1];
klcs0 = leading_cs_pair[Lcd];
klcs1 = leading_cs_pair[Lcd+1];
if ( last_ijcs != -1 ) {
ijcs = last_ijcs;
klcs = last_klcs+1;
} else {
ijcs = leading_cs_pair[Lab] + workerid;
klcs = klcs0;
}
max_nzeri-=6*6*3*3;
if ( nzeri>= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, Ds, G );
nzeri = 0;
}
nzeri4 = nzeri<<2;
for ( ; ijcs<ijcs1; ijcs+=nworkers ) {
val_ab = csp_schwarz[ijcs];
ics = csp_ics[ijcs];
jcs = csp_jcs[ijcs];
ijps0 = csp_leading_ps_pair[ijcs];
nijps = csp_leading_ps_pair[ijcs+1]-ijps0;
iat = shel_atm[ics];
jat = shel_atm[jcs];
iao0 = shel_ini[ics];
jao0 = shel_ini[jcs];
A[0]=atom_x[iat]; A[1]=atom_y[iat]; A[2]=atom_z[iat];
B[0]=atom_x[jat]; B[1]=atom_y[jat]; B[2]=atom_z[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
for ( ; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz[klcs];
if ( val_ab*val_cd < eps_ps4 ) continue;
kcs = csp_ics[klcs];
lcs = csp_jcs[klcs];
float dmax = ofmo_twoint_dmax6(ics,jcs,kcs,lcs);
if ( dmax*val_ab*val_cd < eps_sch ) continue;
klps0 = csp_leading_ps_pair[klcs];
nklps = csp_leading_ps_pair[klcs+1]-klps0;
kat = shel_atm[kcs];
lat = shel_atm[lcs];
kao0 = shel_ini[kcs];
lao0 = shel_ini[lcs];
C[0]=atom_x[kat]; C[1]=atom_y[kat]; C[2]=atom_z[kat];
D[0]=atom_x[lat]; D[1]=atom_y[lat]; D[2]=atom_z[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_ddpp__(
&nijps, &psp_zeta[ijps0], &psp_dkps[ijps0],
&psp_xiza[ijps0], BA,
&nklps, &psp_zeta[klps0], &psp_dkps[klps0],
&psp_xiza[klps0], DC, AC, DDPP );
for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) {
for ( j=0, jao=jao0; j<6; j++, jao++ ) {
if ( jao>iao ) { ix+=3*3; continue; }
coe0 = (iao==jao ? HALF : ONE );
for (k=0, kao=kao0; k<3; k++, kao++ ) {
for ( l=0, lao=lao0; l<3; l++, lao++, ix++ ) {
if ( lao > kao ) continue;
coe = coe0 * ( kao==lao ? HALF : ONE );
if ( fabs(DDPP[ix]) > eps_eri ) {
double x;
x = coe * DDPP[ix];
etmp_val[nzeri] = x;
etmp_ind4[nzeri4+0] = iao;
etmp_ind4[nzeri4+1] = jao;
etmp_ind4[nzeri4+2] = kao;
etmp_ind4[nzeri4+3] = lao;
nzeri++;
nzeri4+=4;
}
}
}
}
} // for ( i, iao);
if ( nzeri >= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4,
Ds, G );
nzeri = nzeri4= 0;
}
} // for ( klcs );
klcs = klcs0;
} // for ( ijcs );
*petmp_non_zero_eri = nzeri;
return 0;
}
/** (dd,ds)タイプの積分計算、および、G行列計算を行う関数
*
* @ingroup integ-twoint-direct
* */
int ofmo_twoint_direct_ddds__(
// paralleization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis set & cutoff table data
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// concerned about buffered direct method
const long *petmp_max_nzeri, long *petmp_non_zero_eri,
double etmp_val[], short int etmp_ind4[],
const int *plast_ijcs, const int *plast_klcs,
// density matrix & G-matrix data
const int *pnao, const double Ds[], double G[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int last_ijcs=*plast_ijcs, last_klcs=*plast_klcs, nao=*pnao;
long max_nzeri=*petmp_max_nzeri;
long nzeri4, nzeri=*petmp_non_zero_eri;
int Lab, Lcd, i, j, k, ix;
int ijcs, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe;
float eps_eri = ofmo_twoint_eps_eri(0);
float eps_ps4 = ofmo_twoint_eps_ps4(0);
float eps_sch = ofmo_twoint_eps_sch(0);
double DDDS[6*6*6];
Lab = La*(La+1)/2+Lb;
Lcd = Lc*(Lc+1)/2+Ld;
ijcs1 = leading_cs_pair[Lab+1];
klcs0 = leading_cs_pair[Lcd];
klcs1 = leading_cs_pair[Lcd+1];
if ( last_ijcs != -1 ) {
ijcs = last_ijcs;
klcs = last_klcs+1;
} else {
ijcs = leading_cs_pair[Lab] + workerid;
klcs = klcs0;
}
max_nzeri-=6*6*6;
if ( nzeri>= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, Ds, G );
nzeri = 0;
}
nzeri4 = nzeri<<2;
for ( ; ijcs<ijcs1; ijcs+=nworkers ) {
val_ab = csp_schwarz[ijcs];
ics = csp_ics[ijcs];
jcs = csp_jcs[ijcs];
ijps0 = csp_leading_ps_pair[ijcs];
nijps = csp_leading_ps_pair[ijcs+1]-ijps0;
iat = shel_atm[ics];
jat = shel_atm[jcs];
iao0 = shel_ini[ics];
jao0 = shel_ini[jcs];
A[0]=atom_x[iat]; A[1]=atom_y[iat]; A[2]=atom_z[iat];
B[0]=atom_x[jat]; B[1]=atom_y[jat]; B[2]=atom_z[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
for ( ; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz[klcs];
if ( val_ab*val_cd < eps_ps4 ) continue;
kcs = csp_ics[klcs];
lcs = csp_jcs[klcs];
float dmax = ofmo_twoint_dmax6(ics,jcs,kcs,lcs);
if ( dmax*val_ab*val_cd < eps_sch ) continue;
klps0 = csp_leading_ps_pair[klcs];
nklps = csp_leading_ps_pair[klcs+1]-klps0;
kat = shel_atm[kcs];
lat = shel_atm[lcs];
kao0 = shel_ini[kcs];
lao = shel_ini[lcs];
C[0]=atom_x[kat]; C[1]=atom_y[kat]; C[2]=atom_z[kat];
D[0]=atom_x[lat]; D[1]=atom_y[lat]; D[2]=atom_z[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_ddds__(
&nijps, &psp_zeta[ijps0], &psp_dkps[ijps0],
&psp_xiza[ijps0], BA,
&nklps, &psp_zeta[klps0], &psp_dkps[klps0],
&psp_xiza[klps0], DC, AC, DDDS );
for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) {
for ( j=0, jao=jao0; j<6; j++, jao++ ) {
if ( jao>iao ) { ix += 6; continue; }
coe = ( iao==jao ? HALF : ONE );
for ( k=0, kao=kao0; k<6; k++, kao++, ix++ ) {
if ( fabs(DDDS[ix]) > eps_eri ) {
double x;
x = coe * DDDS[ix];
etmp_val[nzeri] = x;
etmp_ind4[nzeri4+0] = iao;
etmp_ind4[nzeri4+1] = jao;
etmp_ind4[nzeri4+2] = kao;
etmp_ind4[nzeri4+3] = lao;
nzeri++;
nzeri4+=4;
} // if ( fabs );
} // for ( kao )
} // for ( jao )
} // for ( iao )
if ( nzeri >= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4,
Ds, G );
nzeri = nzeri4= 0;
}
} // for ( klcs );
klcs = klcs0;
} // for ( ijcs );
*petmp_non_zero_eri = nzeri;
return 0;
}
/** (dd,dp)タイプの積分計算、および、G行列計算を行う関数
*
* @ingroup integ-twoint-direct
* */
int ofmo_twoint_direct_dddp__(
// paralleization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis set & cutoff table data
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// concerned about buffered direct method
const long *petmp_max_nzeri, long *petmp_non_zero_eri,
double etmp_val[], short int etmp_ind4[],
const int *plast_ijcs, const int *plast_klcs,
// density matrix & G-matrix data
const int *pnao, const double Ds[], double G[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int last_ijcs=*plast_ijcs, last_klcs=*plast_klcs, nao=*pnao;
long max_nzeri=*petmp_max_nzeri;
long nzeri4, nzeri=*petmp_non_zero_eri;
int Lab, Lcd, i, j, k, l, ix;
int ijcs, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe;
float eps_eri = ofmo_twoint_eps_eri(0);
float eps_ps4 = ofmo_twoint_eps_ps4(0);
float eps_sch = ofmo_twoint_eps_sch(0);
double DDDP[6*6*6*3];
Lab = La*(La+1)/2+Lb;
Lcd = Lc*(Lc+1)/2+Ld;
ijcs1 = leading_cs_pair[Lab+1];
klcs0 = leading_cs_pair[Lcd];
klcs1 = leading_cs_pair[Lcd+1];
if ( last_ijcs != -1 ) {
ijcs = last_ijcs;
klcs = last_klcs+1;
} else {
ijcs = leading_cs_pair[Lab] + workerid;
klcs = klcs0;
}
max_nzeri-=6*6*6*3;
if ( nzeri>= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, Ds, G );
nzeri = 0;
}
nzeri4 = nzeri<<2;
for ( ; ijcs<ijcs1; ijcs+=nworkers ) {
val_ab = csp_schwarz[ijcs];
ics = csp_ics[ijcs];
jcs = csp_jcs[ijcs];
ijps0 = csp_leading_ps_pair[ijcs];
nijps = csp_leading_ps_pair[ijcs+1]-ijps0;
iat = shel_atm[ics];
jat = shel_atm[jcs];
iao0 = shel_ini[ics];
jao0 = shel_ini[jcs];
A[0]=atom_x[iat]; A[1]=atom_y[iat]; A[2]=atom_z[iat];
B[0]=atom_x[jat]; B[1]=atom_y[jat]; B[2]=atom_z[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
for ( ; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz[klcs];
if ( val_ab*val_cd < eps_ps4 ) continue;
kcs = csp_ics[klcs];
lcs = csp_jcs[klcs];
float dmax = ofmo_twoint_dmax6(ics,jcs,kcs,lcs);
if ( dmax*val_ab*val_cd < eps_sch ) continue;
klps0 = csp_leading_ps_pair[klcs];
nklps = csp_leading_ps_pair[klcs+1]-klps0;
kat = shel_atm[kcs];
lat = shel_atm[lcs];
kao0 = shel_ini[kcs];
lao0 = shel_ini[lcs];
C[0]=atom_x[kat]; C[1]=atom_y[kat]; C[2]=atom_z[kat];
D[0]=atom_x[lat]; D[1]=atom_y[lat]; D[2]=atom_z[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_dddp__(
&nijps, &psp_zeta[ijps0], &psp_dkps[ijps0],
&psp_xiza[ijps0], BA,
&nklps, &psp_zeta[klps0], &psp_dkps[klps0],
&psp_xiza[klps0], DC, AC, DDDP );
for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) {
for ( j=0, jao=jao0; j<6; j++, jao++ ) {
if ( jao>iao ) { ix += 6*3; continue; }
coe = ( iao==jao ? HALF : ONE );
for ( k=0, kao=kao0; k<6; k++, kao++ ) {
for ( l=0, lao=lao0; l<3; l++, lao++, ix++ ) {
if ( fabs(DDDP[ix]) > eps_eri ) {
double x;
x = coe * DDDP[ix];
etmp_val[nzeri] = x;
etmp_ind4[nzeri4+0] = iao;
etmp_ind4[nzeri4+1] = jao;
etmp_ind4[nzeri4+2] = kao;
etmp_ind4[nzeri4+3] = lao;
nzeri++;
nzeri4+=4;
} // if ( fabs );
}
} // for ( kao )
} // for ( jao )
} // for ( iao )
if ( nzeri>= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, Ds, G );
nzeri = nzeri4 = 0;
}
} // for ( klcs );
klcs = klcs0;
} // for ( ijcs );
*petmp_non_zero_eri = nzeri;
return 0;
}
/** (dd,dd)タイプの積分計算、および、G行列計算を行う関数
*
* @ingroup integ-twoint-direct
* */
int ofmo_twoint_direct_dddd__(
// paralleization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis set & cutoff table data
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// concerned about buffered direct method
const long *petmp_max_nzeri, long *petmp_non_zero_eri,
double etmp_val[], short int etmp_ind4[],
const int *plast_ijcs, const int *plast_klcs,
// density matrix & G-matrix data
const int *pnao, const double Ds[], double G[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int last_ijcs=*plast_ijcs, last_klcs=*plast_klcs, nao=*pnao;
long max_nzeri=*petmp_max_nzeri;
long nzeri4, nzeri=*petmp_non_zero_eri;
int Lab, Lcd, i, j, k, l, ix, ipat;
int I2, IJ, K2, KL;
int ijcs, ijcs1;
int klcs, klcs0;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe, coe0;
float eps_eri = ofmo_twoint_eps_eri(0);
float eps_ps4 = ofmo_twoint_eps_ps4(0);
float eps_sch = ofmo_twoint_eps_sch(0);
double DDDD[6*6*6*6];
Lab = La*(La+1)/2+Lb;
Lcd = Lc*(Lc+1)/2+Ld;
ijcs1 = leading_cs_pair[Lab+1];
klcs0 = leading_cs_pair[Lcd];
if ( last_ijcs != -1 ) {
ijcs = last_ijcs;
klcs = last_klcs+1;
} else {
ijcs = leading_cs_pair[Lab] + workerid;
klcs = klcs0;
}
max_nzeri-=6*6*6*6;
if ( nzeri>= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, Ds, G );
nzeri = 0;
}
nzeri4 = nzeri<<2;
for ( ; ijcs<ijcs1; ijcs+=nworkers ) {
val_ab = csp_schwarz[ijcs];
ics = csp_ics[ijcs];
jcs = csp_jcs[ijcs];
ijps0 = csp_leading_ps_pair[ijcs];
nijps = csp_leading_ps_pair[ijcs+1]-ijps0;
iat = shel_atm[ics];
jat = shel_atm[jcs];
iao0 = shel_ini[ics];
jao0 = shel_ini[jcs];
A[0]=atom_x[iat]; A[1]=atom_y[iat]; A[2]=atom_z[iat];
B[0]=atom_x[jat]; B[1]=atom_y[jat]; B[2]=atom_z[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
for ( ; klcs<=ijcs; klcs++ ) {
val_cd = csp_schwarz[klcs];
if ( val_ab*val_cd < eps_ps4 ) continue;
kcs = csp_ics[klcs];
lcs = csp_jcs[klcs];
float dmax = ofmo_twoint_dmax6(ics,jcs,kcs,lcs);
if ( dmax*val_ab*val_cd < eps_sch ) continue;
klps0 = csp_leading_ps_pair[klcs];
nklps = csp_leading_ps_pair[klcs+1]-klps0;
kat = shel_atm[kcs];
lat = shel_atm[lcs];
kao0 = shel_ini[kcs];
lao0 = shel_ini[lcs];
C[0]=atom_x[kat]; C[1]=atom_y[kat]; C[2]=atom_z[kat];
D[0]=atom_x[lat]; D[1]=atom_y[lat]; D[2]=atom_z[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_dddd__(
&nijps, &psp_zeta[ijps0], &psp_dkps[ijps0],
&psp_xiza[ijps0], BA,
&nklps, &psp_zeta[klps0], &psp_dkps[klps0],
&psp_xiza[klps0], DC, AC, DDDD );
ipat = ( (ics==kcs && jcs>lcs) ? true : false );
for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) {
I2 = (iao*iao+iao)>>1;
for ( j=0, jao=jao0; j<6; j++, jao++ ) {
if ( jao>iao ) { ix+=6*6; continue; }
IJ = I2 + jao;
coe0 = ( iao==jao ? HALF : ONE );
for ( k=0, kao=kao0; k<6; k++, kao++ ) {
K2 = (kao*kao+kao)>>1;
for ( l=0, lao=lao0; l<6; l++, lao++, ix++ ) {
if ( lao>kao ) continue;
if ( fabs(DDDD[ix]) > eps_eri ) {
double x;
KL = K2 + lao;
if ( IJ >= KL || ipat ) {
coe = coe0;
if ( kao==lao ) coe *= HALF;
if ( KL == IJ ) coe *= HALF;
x = coe * DDDD[ix];
etmp_val[nzeri] = x;
etmp_ind4[nzeri4+0] = iao;
etmp_ind4[nzeri4+1] = jao;
etmp_ind4[nzeri4+2] = kao;
etmp_ind4[nzeri4+3] = lao;
nzeri++;
nzeri4+=4;
}
} // if ( fabs )
} // for ( lao )
} // for ( kao )
} // for ( jao )
} // for ( iao )
if ( nzeri >= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4,
Ds, G );
nzeri = nzeri4= 0;
}
} // for ( klcs );
klcs = klcs0;
} // for ( ijcs );
*petmp_non_zero_eri = nzeri;
return 0;
}
|
DepthImageBasedGeometryApproximation.h | /// \ingroup base
/// \class ttk::DepthImageBasedGeometryApproximation
/// \author Jonas Lukasczyk <jl@jluk.de>
/// \date 1.7.2018
///
/// \brief TTK %depthImageBasedGeometryApproximation processing package.
///
/// %DepthImageBasedGeometryApproximation is a TTK processing package that approximates geomerty based on an input depth image and its corresponding camera parameters.
///
/// Related publication:
/// 'VOIDGA: A View-Approximation Oriented Image Database Generation Approach'
/// Jonas Lukasczyk, Eric Kinner, James Ahrens, Heike Leitte, and Christoph Garth.
/// IEEE 8th Symposium on Large Data Analysis and Visualization (LDAV), 2018.
///
/// \sa ttk::Triangulation
#pragma once
// base code includes
#include <Wrapper.h>
using namespace std;
namespace ttk{
class DepthImageBasedGeometryApproximation : public Debug{
public:
DepthImageBasedGeometryApproximation(){};
~DepthImageBasedGeometryApproximation(){};
// Execute the geometry approximation.
template <class dataType> int execute(
dataType* depthValues,
double* camPos,
double* camDir,
double* camUp,
double* camRes,
double* camNearFar,
double* camHeight,
int subsampling,
vector<size_t>& indicies,
vector<tuple<double,double,double>>& vertices,
vector<tuple<int,int,int>>& triangles,
vector<double>& triangleDistortions
) const;
};
}
template <class dataType> int ttk::DepthImageBasedGeometryApproximation::execute(
dataType* depthValues,
double* camPos,
double* camDir,
double* camUp,
double* camRes,
double* camNearFar,
double* camHeight,
int subsampling,
vector<size_t>& indicies,
vector<tuple<double,double,double>>& vertices,
vector<tuple<int,int,int>>& triangles,
vector<double>& triangleDistortions
) const{
Timer t;
size_t step = subsampling + 1;
size_t camResST[2] = {(size_t) camRes[0], (size_t) camRes[1]};
// -------------------------------------------------------------------------
// Compute Camera Vectors
// -------------------------------------------------------------------------
// Compute camera size
double camSize[2] = {
camRes[0]/camRes[1]*camHeight[0],
camHeight[0]
};
// Compute camRight = camDir x CamUp
double camRight[3] = {
camDir[1]*camUp[2] - camDir[2]*camUp[1],
camDir[2]*camUp[0] - camDir[0]*camUp[2],
camDir[0]*camUp[1] - camDir[1]*camUp[0]
};
double temp = sqrt( camRight[0]*camRight[0] + camRight[1]*camRight[1] + camRight[2]*camRight[2] );
camRight[0]/=temp;
camRight[1]/=temp;
camRight[2]/=temp;
// Compute true up vector
double camUpTrue[3] = {
camDir[1]*(-camRight[2]) - camDir[2]*(-camRight[1]),
camDir[2]*(-camRight[0]) - camDir[0]*(-camRight[2]),
camDir[0]*(-camRight[1]) - camDir[1]*(-camRight[0])
};
temp = sqrt( camUpTrue[0]*camUpTrue[0] + camUpTrue[1]*camUpTrue[1] + camUpTrue[2]*camUpTrue[2] );
camUpTrue[0]/=temp;
camUpTrue[1]/=temp;
camUpTrue[2]/=temp;
// Compute Index Map
size_t n = camResST[0] * camResST[1];
vector<int> pixelIndexVertexIndexMap;
pixelIndexVertexIndexMap.resize( n );
size_t numberNewVertices = 0;
{
for(size_t i=0; i<n; i++)
pixelIndexVertexIndexMap[i] = depthValues[i] > 0.99 ? -1 : numberNewVertices++;
}
// -------------------------------------------------------------------------
// Create Vertices
// -------------------------------------------------------------------------
{
// Compute pixel size in world coordinates
double pixelWidthWorld = camSize[0]/camRes[0];
double pixelHeightWorld = camSize[1]/camRes[1];
// Optimization: precompute half of the camera size to reduce the number of operations in the for loop
// Include a half pixel offset (-0.5) to center vertices at pixel centers
double camWidthWorldHalf = 0.5*camSize[0] - 0.5*pixelWidthWorld;
double camHeightWorldHalf = 0.5*camSize[1] - 0.5*pixelHeightWorld;
// Make room for new vertices
vertices.resize( numberNewVertices );
indicies.resize( numberNewVertices );
// Compute depth delta
double delta = camNearFar[1]-camNearFar[0];
// Optimization: reorient camera model to bottom left corner to reduce operations in for loop
double camPosCorner[3] = {
camPos[0] - camRight[0]*camWidthWorldHalf - camUpTrue[0]*camHeightWorldHalf,
camPos[1] - camRight[1]*camWidthWorldHalf - camUpTrue[1]*camHeightWorldHalf,
camPos[2] - camRight[2]*camWidthWorldHalf - camUpTrue[2]*camHeightWorldHalf
};
// Compute vertex positions and parallize over rows
#ifdef TTK_ENABLE_OPENMP
#pragma omp parallel for num_threads(threadNumber_)
#endif
for(size_t y=0; y<camResST[1]; y+=step){
double v = ((double)y)*pixelHeightWorld;
double vTimesUp[3] = {
v*camUpTrue[0],
v*camUpTrue[1],
v*camUpTrue[2]
};
size_t yOffset = y*camResST[0];
for(size_t x=0; x<camResST[0]; x+=step){
size_t pixelIndex = x + yOffset;
int vertexIndex = pixelIndexVertexIndexMap[ pixelIndex ];
if(vertexIndex < 0) continue;
// double d = (double)(depthValues[ pixelIndex ])*delta+camNearFar[0];
double d = ((double)depthValues[pixelIndex]) * delta + camNearFar[0];
double u = ((double)x)*pixelWidthWorld;
auto& vertex = vertices[vertexIndex];
// Store pixel index of vertex
indicies[ vertexIndex ] = pixelIndex;
get<0>(vertex) = camPosCorner[0] + u*camRight[0] + vTimesUp[0] + d*camDir[0];
get<1>(vertex) = camPosCorner[1] + u*camRight[1] + vTimesUp[1] + d*camDir[1];
get<2>(vertex) = camPosCorner[2] + u*camRight[2] + vTimesUp[2] + d*camDir[2];
}
}
}
// -------------------------------------------------------------------------
// Create Triangles
// -------------------------------------------------------------------------
{
auto absDiff = [](dataType a, dataType b){
return a>b ? a-b : b-a;
};
/* Index Structure:
0 - 1
| / |
2 - 3
*/
size_t xl = camResST[0]-step;
size_t yl = camResST[1]-step;
size_t yD = step*camResST[0];
for(size_t y=0; y<yl; y+=step){
for(size_t x=0; x<xl; x+=step){
size_t i0 = x + y*camResST[0];
size_t i1 = i0 + step;
size_t i2 = i0 + yD;
size_t i3 = i2 + step;
int i0Index = pixelIndexVertexIndexMap[i0];
int i1Index = pixelIndexVertexIndexMap[i1];
int i2Index = pixelIndexVertexIndexMap[i2];
int i3Index = pixelIndexVertexIndexMap[i3];
dataType i0Depth = depthValues[i0];
dataType i1Depth = depthValues[i1];
dataType i2Depth = depthValues[i2];
dataType i3Depth = depthValues[i3];
if(pixelIndexVertexIndexMap[i1]>=0 && pixelIndexVertexIndexMap[i2]>=0){
// Check first triangle
if(pixelIndexVertexIndexMap[i0]>=0){
triangles.push_back( make_tuple( i0Index, i1Index, i2Index ) );
dataType distortion = max(
absDiff(i0Depth,i1Depth),
max( absDiff(i1Depth,i2Depth), absDiff(i0Depth,i2Depth) )
);
triangleDistortions.push_back( distortion );
}
// Check second triangle
if(pixelIndexVertexIndexMap[i3]>=0){
triangles.push_back( make_tuple( i1Index, i3Index, i2Index ) );
dataType distortion = max(
absDiff(i3Depth,i1Depth),
max( absDiff(i1Depth,i2Depth), absDiff(i3Depth,i2Depth) )
);
triangleDistortions.push_back( distortion );
}
}
}
}
}
// Print performance
{
stringstream msg;
msg << "[ttkDepthImageBasedGeometryApproximation] Depth Image ("<<camResST[0]<<"x"<<camResST[1]<<":"<<step<<") processed in " << t.getElapsedTime() << " s. (" << threadNumber_ << " thread(s))." << endl;
dMsg(cout, msg.str(), timeMsg);
}
{
stringstream msg;
msg << "[ttkDepthImageBasedGeometryApproximation] Generated (" << vertices.size() << " vertices) and (" << triangles.size() << " triangles)." << endl;
dMsg(cout, msg.str(), infoMsg);
}
return 0;
}
|
dgetrs.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgetrs.c, normal z -> d, Fri Sep 28 17:38:06 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
******************************************************************************/
int plasma_dgetrs(int n, int nrhs,
double *pA, int lda, int *ipiv,
double *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
if (n < 0) {
plasma_error("illegal value of n");
return -1;
}
if (nrhs < 0) {
plasma_error("illegal value of nrhs");
return -2;
}
if (lda < imax(1, n)) {
plasma_error("illegal value of lda");
return -4;
}
if (ldb < imax(1, n)) {
plasma_error("illegal value of ldb");
return -7;
}
// quick return
if (imin(n, nrhs) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_trsm(plasma, PlasmaRealDouble, n, n);
// Set tiling parameters.
int nb = plasma->nb;
// Initialize barrier.
plasma_barrier_init(&plasma->barrier);
// Create tile matrix.
plasma_desc_t A;
plasma_desc_t B;
int retval;
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
n, n, 0, 0, n, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
n, nrhs, 0, 0, n, nrhs, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_dge2desc(pA, lda, A, &sequence, &request);
plasma_omp_dge2desc(pB, ldb, B, &sequence, &request);
// Call the tile async function.
plasma_omp_dgetrs(A, ipiv, B, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_ddesc2ge(B, pB, ldb, &sequence, &request);
}
// Free matrix A in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
******************************************************************************/
void plasma_omp_dgetrs(plasma_desc_t A, int *ipiv,
plasma_desc_t B,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid A");
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid B");
return;
}
if (sequence == NULL) {
plasma_fatal_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_fatal_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.n == 0 || B.n == 0)
return;
// Call the parallel functions.
plasma_pdgeswp(PlasmaRowwise, B, ipiv, 1, sequence, request);
plasma_pdtrsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit,
1.0, A,
B,
sequence, request);
plasma_pdtrsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit,
1.0, A,
B,
sequence, request);
}
|
GB_binop__ne_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ne_uint64)
// A.*B function (eWiseMult): GB (_AemultB_01__ne_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__ne_uint64)
// A.*B function (eWiseMult): GB (_AemultB_03__ne_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_uint64)
// A*D function (colscale): GB (_AxD__ne_uint64)
// D*A function (rowscale): GB (_DxB__ne_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__ne_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__ne_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_uint64)
// C=scalar+B GB (_bind1st__ne_uint64)
// C=scalar+B' GB (_bind1st_tran__ne_uint64)
// C=A+scalar GB (_bind2nd__ne_uint64)
// C=A'+scalar GB (_bind2nd_tran__ne_uint64)
// C type: bool
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_UINT64 || GxB_NO_NE_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__ne_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ne_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ne_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ne_uint64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ne_uint64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ne_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__ne_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ne_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__ne_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ne_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ne_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ne_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__ne_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__ne_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ten_tusscher_2004_epi_S1_13.c | //Original Ten Tusscher
#include <assert.h>
#include <stdlib.h>
#include "ten_tusscher_2004_epi_S1_13.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
//TODO: this should be called only once for the whole mesh, like in the GPU code
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.6124649455479,0.00127794843601506,0.780792340373146,0.780621416430051,0.000173708999922449,0.485550218750528,0.00293105929439211,0.999998362618276,1.91930561072561e-08,1.87995323300123e-05,0.999771109571080,1.00717919024407,0.999996460509174,4.32012539733253e-05,0.681225232256513,9.64639490171753,139.835052468258};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) {
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
/// real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
//#ifdef EPI
real Gto=0.294;
//#endif
// #ifdef ENDO
// real Gto=0.073;
//#endif
//#ifdef MCELL
// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={13.9878532723791,0.000381859667002297,0.000164377584172001,0.000429492452900182,0.281717671480526,0.172178664836313,0.158009524014960,3.54321400489854,0.0185670643252902,2.13545487708985,1099.99990980037,0.000491721845343899,0.419354711210666,0.0199628106883488,0.00141401145930471,3.06197556760024e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
fvmpor.h | #ifndef FVMPOR_H
#define FVMPOR_H
//#define USE_PRINT
#ifdef USE_PRINT
#define PRINT(fid,v) fid << #v << std::endl; \
for(int kkkk=0; kkkk<v.dim(); kkkk++ )\
fid << v[kkkk] << " ";\
fid << std::endl;
#else
#define PRINT(fid,v) ;
#endif
#include "definitions.h"
#include "shape.h"
#include <cublas.h>
#include <lin/impl/rebind.h>
#include <lin/lin.h>
#include <fvm/fvm.h>
#include <fvm/mesh.h>
//#include <fvm/solver.h>
#include <fvm/solver_compact.h>
#include <fvm/physics_base.h>
#include <util/intvector.h>
#include <util/interpolation.h>
#include <util/dimvector.h>
#include <util/timer.h>
#include <mkl_spblas.h>
#include <mkl_service.h>
#include <omp.h>
#include <vector>
#include <memory>
#include <map>
#include <iostream>
namespace fvmpor {
template <typename T>
struct CoordTraits_{
static bool is_device() {return false;};
};
template <>
struct CoordTraits_<lin::gpu::Coordinator<int> >{
static bool is_device() {return true;};
};
enum SpatialWeightType {weightUpwind, weightAveraging, weightVanLeer};
using lin::all;
template <typename CoordHost, typename CoordDevice>
class VarSatPhysicsImpl{
public:
typedef typename lin::rebind<CoordHost, double>::type CoordHostDouble;
typedef typename lin::rebind<CoordHost, int>::type CoordHostInt;
typedef typename lin::rebind<CoordDevice, double>::type CoordDeviceDouble;
typedef typename lin::rebind<CoordDevice, int>::type CoordDeviceInt;
typedef lin::Vector<double, CoordHostDouble> TVec;
typedef lin::Vector<int, CoordHostInt> TIndexVec;
typedef lin::Vector<double, CoordDeviceDouble> TVecDevice;
typedef lin::Vector<int, CoordDeviceInt> TIndexVecDevice;
typedef util::InterpolationMatrix<CoordDevice> InterpolationMatrix;
typedef util::DimVector<TVecDevice> DimVector;
protected:
typedef mesh::Point Point;
// computation during a timestep
void process_faces_lim( const mesh::Mesh &m );
void process_faces_shape( const mesh::Mesh &m );
void process_volumes_psk( const mesh::Mesh &m );
void process_derivative_coefficients( const mesh::Mesh &m );
void process_fluxes( double t, const mesh::Mesh &m );
void process_spatial_weights(const mesh::Mesh& m);
// physical zones
const PhysicalZone& physical_zone( int ) const;
int physical_zones() const;
// boundary conditions
int boundary_conditions() const { return boundary_conditions_h_.size(); };
const BoundaryCondition& boundary_condition_h( int ) const;
const Constants& constants() const { return constants_; };
////////////////////////////////
// routines for setting up
///////////////////////////////
void set_physical_zones();
void set_boundary_conditions();
void initialise_vectors( const mesh::Mesh &m );
void set_initial_conditions( double &t, const mesh::Mesh& m );
void set_constants();
void initialise_shape_functions(const mesh::Mesh& m);
// physics specific
void saturation( TVecDevice& h, const PhysicalZone &props, TVecDevice &Sw, TVecDevice &dSw, TVecDevice &krw );
// communicator for global communication of doubles on the nodes
mpi::Communicator<double> node_comm_;
// physical definitions
int dimension;
std::vector<PhysicalZone> physical_zones_;
std::map<int,BoundaryCondition> boundary_conditions_h_;
Constants constants_;
// tags whether a node is dirichlet
TIndexVec is_dirichlet_h_vec_; // HOST
TIndexVecDevice dirichlet_nodes_; // DEVICE
TVecDevice h_dirichlet_; // DEVICE
// spatial weighting
int CV_flux_comm_tag;
SpatialWeightType spatial_weighting;
TIndexVecDevice CV_up; // DEVICE
TVecDevice CV_flux; // DEVICE
TIndexVecDevice edge_up; // DEVICE
TIndexVecDevice edge_down; // DEVICE
TVecDevice edge_flux; // DEVICE
// derived quantities
std::vector<TVecDevice> head_scv; // DEVICE
std::vector<TVecDevice> phi_scv; // DEVICE
std::vector<TVecDevice> dphi_scv; // DEVICE
//std::vector<TVecDevice> Se_scv; // DEVICE
std::vector<TVecDevice> Sw_scv; // DEVICE
std::vector<TVecDevice> theta_scv; // DEVICE
std::vector<TVecDevice> dSw_scv; // DEVICE
std::vector<TVecDevice> krw_scv; // DEVICE
std::vector<TIndexVecDevice> index_scv; // DEVICE
std::vector<TVecDevice> weight_scv; // DEVICE
std::map<int, int> zones_map_;
// spatial weighting for CV faces
std::vector<TIndexVecDevice> n_front_; // DEVICE
std::vector<TIndexVecDevice> n_back_; // DEVICE
std::vector<TIndexVecDevice> p_front_; // DEVICE
std::vector<TIndexVecDevice> q_front_; // DEVICE
std::vector<TIndexVecDevice> p_back_; // DEVICE
std::vector<TIndexVecDevice> q_back_; // DEVICE
TVecDevice edge_weight_front_; // DEVICE
TVecDevice edge_weight_back_; // DEVICE
TIndexVecDevice edge_node_front_; // DEVICE
TIndexVecDevice edge_node_back_; // DEVICE
// stores list of nodes on seepage faces
//TIndexVec seepage_nodes;
//int seepage_tag; // unique tag for the seepage BC
// for interpolation from nodes to CV faces
InterpolationMatrix shape_matrix;
InterpolationMatrix shape_gradient_matrixX;
InterpolationMatrix shape_gradient_matrixY;
InterpolationMatrix shape_gradient_matrixZ;
InterpolationMatrix flux_lim_matrix;
InterpolationMatrix cvflux_matrix;
InterpolationMatrix dirichlet_matrix;
TVecDevice h_vec; // head at the nodes // DEVICE
TVecDevice hp_vec_; // head derivative at the nodes // DEVICE
TVecDevice M_vec_; // M at the nodes // DEVICE
TVecDevice Mp_vec_; // M derivative at the nodes // DEVICE
//TVecDevice res_tmp;
DimVector grad_h_faces_; // head gradient at CV faces // DEVICE
TVecDevice h_faces; // head at CV faces // DEVICE
TVecDevice M_flux_faces; // mass flux at CV faces // DEVICE
TVecDevice qdotn_faces; // volumetric fluid flux at CV faces // DEVICE
// storing derived quantities averaged for each control volume
TVecDevice rho_vec, Sw_vec, dSw_vec, theta_vec; // DEVICE
TVecDevice phi_vec, dphi_vec; // DEVICE
// storing derived quantities at cv faces (using c and h values at faces)
TVecDevice rho_faces; // DEVICE
// storing upwinded/flux limitted values at cv faces
TVecDevice rho_faces_lim, krw_faces_lim; // DEVICE
// storing coefficients for derivative terms
TVecDevice ahh_vec; // DEVICE
// storing values at faces
DimVector K_faces_; // DEVICE
DimVector norm_faces_; // DEVICE
DimVector qsat_faces_; // DEVICE HOST
};
template <typename value_type, typename CoordHost, typename CoordDevice>
class VarSatPhysics :
public fvm::PhysicsBase< VarSatPhysics<value_type, CoordHost, CoordDevice>,
value_type>,
public VarSatPhysicsImpl<CoordHost,CoordDevice>
{
typedef fvm::PhysicsBase<VarSatPhysics, value_type> base;
typedef VarSatPhysicsImpl<CoordHost,CoordDevice> impl;
int num_calls;
friend class Preconditioner;
typename impl::TVecDevice res_tmp;
typename impl::TVec res_tmp_host;
public:
typedef typename base::iterator iterator;
typedef typename base::const_iterator const_iterator;
typedef typename base::Callback Callback;
//VarSatPhysics(const mesh::Mesh &m) : num_calls(0), res_tmp(TVec(value_type::variables*m.local_nodes())) {};
VarSatPhysics() : num_calls(0) {};
int calls() const { return num_calls; }
/////////////////////////////////
// GLOBAL
/////////////////////////////////
value_type flux(double t, const mesh::CVFace& cvf, const_iterator sol) const;
value_type boundary_flux(double t, const mesh::CVFace& cvf, const_iterator sol) const;
double compute_mass(const mesh::Mesh& m, const_iterator u);
double mass_flux_per_time(const mesh::Mesh& m);
/////////////////////////////////
// VARIABLE-SPECIFIC
/////////////////////////////////
void initialise( double& t, const mesh::Mesh& m, iterator u,
iterator udash, iterator temp, Callback);
void preprocess_evaluation( double t, const mesh::Mesh& m,
const_iterator u, const_iterator udash);
void preprocess_timestep( double t, const mesh::Mesh& m,
const_iterator sol, const_iterator deriv);
value_type lhs( double t, const mesh::Volume& volume,
const_iterator u, const_iterator udash) const;
void residual_evaluation( double t, const mesh::Mesh& m,
const_iterator sol, const_iterator deriv, iterator res);
value_type dirichlet(double t, const mesh::Node& n) const;
};
// **************************************************************************
// * IMPLEMENTATION *
// **************************************************************************
using mesh::Point;
template <typename TVec>
void density(TVec& h, TVec& rho, const Constants& constants)
{
double beta = constants.beta();
double rho_0 = constants.rho_0();
double g = constants.g();
if( beta ){
rho = h;
rho *= rho_0*rho_0*g*beta;
rho += rho_0;
}else{
rho(all) = rho_0;
}
}
template <typename TVec>
void porosity( TVec& h,
TVec& phi,
TVec& dphi,
const PhysicalZone& props,
const Constants& constants)
{
double g = constants.g();
double rho_0 = constants.rho_0();
double phi_0 = props.phi;
double alpha = props.alpha;
// porosity
if(alpha==0.){
phi(all) = phi_0;
dphi.zero();
}
else{
double factor = (phi_0-1.)*rho_0*g*alpha;
phi(all) = 1.;
phi(all) += factor * h;
}
}
template <typename CoordHost, typename CoordDevice>
void VarSatPhysicsImpl<CoordHost,CoordDevice>::saturation(
TVecDevice& h,
const PhysicalZone &props,
TVecDevice &Sw,
TVecDevice &dSw,
TVecDevice &krw )
{
double alphaVG = props.alphaVG;
double nVG = props.nVG;
double mVG = props.mVG;
double S_r = props.S_r;
double phi = props.phi;
if( CoordTraits_<CoordDeviceInt>::is_device() ){
const double *h_ptr = h.data();
double *dSw_ptr = dSw.data();
double *Sw_ptr = Sw.data();
double *krw_ptr = krw.data();
lin::gpu::saturation( h_ptr, Sw_ptr, dSw_ptr, krw_ptr,
h.dim(), alphaVG, nVG, mVG, S_r, phi);
}
else{
// if a = (alpha*|h|)^n, and b = 1+a
// set dSw = a
dSw(all) = -alphaVG*h;
dSw.pow(nVG);
// Sw = 1/b
Sw(all) = dSw+1.;
krw(all) = -1.;
krw /= Sw;
// dSw /= b
dSw /= Sw;
// Sw = 1/(b^m)
// this is the final value for Sw
Sw.pow(-mVG);
// find dSw
dSw *= Sw;
dSw /= h;
dSw *= -(1-S_r)*(nVG-1);
// find krw
krw += 1.;
krw.pow(mVG);
krw -= 1.;
krw.pow(2);
krw(all) *= sqrt(Sw);
Sw *= (1-S_r);
Sw += S_r;
// now override values for saturated h
int n=h.dim();
for(int i=0; i<n; i++){
if(h.at(i)>=0.){
dSw.at(i) = 0.;
Sw.at(i) = 1.;
krw.at(i) = 1.;
}
}
}
}
template <typename CoordHost, typename CoordDevice>
void VarSatPhysicsImpl<CoordHost,CoordDevice>::initialise_vectors( const mesh::Mesh &m ){
std::ofstream fid;
dimension = m.dim();
std::cout << "mesh has " << m.nodes() << " nodes " << m.elements() << " elements and " << m.cvfaces() << " CV faces" << std::endl;
std::cout << "with faces internal " << m.interior_cvfaces() << std::endl;
node_comm_.set_pattern( "NP_double", m.node_pattern() );
//sort out omp thread affinity
// if we are expected to use a GPU ensure that the CUBLAS
// library has been initialised
// This also ensures that the device is setup correctly
if(CoordTraits_<CoordDeviceInt>::is_device()){
fid.open("initialiseGPU.txt");
std::cout << "intialising cublas" << std::endl;
int num_devices = lin::gpu::num_devices();
int num_processes = m.mpicomm()->size();
int this_process = m.mpicomm()->rank();
assert(num_processes<=num_devices);
lin::gpu::set_device(this_process);
std::string device_name = lin::gpu::get_device_name();
*(m.mpicomm()) << "===============================" << std::endl
<< "using GPU device " << this_process << " (" << device_name << ")" << std::endl
<< "===============================" << std::endl;
assert( cublasInit() == CUBLAS_STATUS_SUCCESS );
}
else
fid.open("initialiseCPU.txt");
// set physical properties
set_constants();
set_physical_zones();
set_boundary_conditions();
// initialise space for storing p-s-k values
int N = m.nodes();
Sw_vec = TVecDevice(N);
dSw_vec = TVecDevice(N);
rho_vec = TVecDevice(N);
theta_vec = TVecDevice(N);
phi_vec = TVecDevice(N);
dphi_vec = TVecDevice(N);
rho_faces_lim = TVecDevice(m.interior_cvfaces());
krw_faces_lim = TVecDevice(m.interior_cvfaces());
rho_faces = TVecDevice(m.interior_cvfaces());
// spatial weightings
CV_up = TIndexVec(m.local_nodes());
CV_flux = TVec(m.nodes());
CV_flux_comm_tag = node_comm_.vec_add(CV_flux.data());
edge_up = TIndexVecDevice(m.edges());
edge_down = TIndexVecDevice(m.edges());
edge_flux = TVecDevice(m.edges());
M_flux_faces = TVecDevice(m.cvfaces());
qdotn_faces = TVecDevice(m.cvfaces());
// initialise space for derivative coefficients
int NL = m.local_nodes();
ahh_vec = TVecDevice( NL );
// tag dirichlet nodes
is_dirichlet_h_vec_ = TIndexVec(m.local_nodes());
int num_dirichlet = 0;
for( int i=0; i<m.local_nodes(); i++ ){
const mesh::Node& n = m.node(i);
// look for dirichlet tags attached to the node
for( int j=0; j<n.boundaries(); j++ ){
int tag = n.boundary(j);
if( boundary_condition_h(tag).is_dirichlet() ){
is_dirichlet_h_vec_[i] = tag;
num_dirichlet++;
}
}
}
PRINT(fid, is_dirichlet_h_vec_);
// make a list of the dirichlet nodes
TIndexVec dirichlet_nodes(num_dirichlet);
int count=0;
for(int i=0; i<m.local_nodes(); i++)
if(is_dirichlet_h_vec_[i])
dirichlet_nodes[count++] = i;
// copy to device
dirichlet_nodes_ = dirichlet_nodes;
// store the prescribed head values
// currently this only works for time-invariant dirichlet values
TVec h_dirichlet(num_dirichlet);
for(int n=0; n<num_dirichlet; n++){
double t=0.;
int i = dirichlet_nodes[n];
const BoundaryCondition& bc = boundary_condition_h(is_dirichlet_h_vec_[i]);
// fixed dirichlet
if( bc.type()==1 ){
h_dirichlet[n] = bc.value(t);
}
else{
double el = dimension == 2 ? m.node(i).point().y : m.node(i).point().z;
if(bc.type()==4)
h_dirichlet[n] = bc.hydrostatic(t, el);
else{
h_dirichlet[n] = bc.hydrostatic_shore(t, el);
}
}
}
// copy to device
h_dirichlet_ = h_dirichlet;
// initialise vectors used in calculating derived quantities such as saturation
// allocate room for each of the arrays
std::set<int> zones;
for(int i=0; i<m.elements(); i++)
zones.insert(m.element(i).physical_tag());
int num_zones = zones.size();
int indx=0;
for( std::set<int>::iterator it=zones.begin(); it!=zones.end(); it++)
zones_map_[*it] = indx++;
// temp var
std::vector< std::vector<double> > weight_scv_tmp;
std::vector< std::vector<int> > index_scv_tmp;
weight_scv_tmp.resize( num_zones );
index_scv_tmp.resize( num_zones );
std::vector<std::map<int,int> > nodes_idx;
nodes_idx.resize(num_zones);
// compile index and weight information mapping node information to scv information
for(int i=0; i<m.nodes(); i++){
const mesh::Volume& cv = m.volume(i);
double cv_vol = cv.vol();
std::vector<double> weights(num_zones);
std::vector<int> counts(num_zones);
for(int j=0; j<cv.scvs(); j++){
int tag = zones_map_[cv.scv(j).element().physical_tag()];
assert(tag<num_zones);
weights[tag] += cv.scv(j).vol() / cv_vol;
counts[tag]++;
}
for(int j=0; j<num_zones; j++){
if(counts[j]){
weight_scv_tmp[j].push_back(weights[j]);
index_scv_tmp[j].push_back(i);
nodes_idx[j][i] = index_scv_tmp[j].size()-1;
}
}
}
weight_scv.resize( num_zones );
index_scv.resize( num_zones );
for(int i=0; i<num_zones; i++){
TVec w_tmp(weight_scv_tmp[i].begin(), weight_scv_tmp[i].end());
TIndexVec i_tmp(index_scv_tmp[i].begin(), index_scv_tmp[i].end());
weight_scv[i] = w_tmp;
index_scv[i] = i_tmp;
}
// OUTPUT
for(int i=0; i<num_zones; i++){
PRINT(fid, weight_scv[i]);
PRINT(fid, index_scv[i]);
}
// allocate room for head values mapped onto SCVs
head_scv.resize( num_zones );
phi_scv.resize( num_zones );
dphi_scv.resize( num_zones );
//Se_scv.resize( num_zones );
Sw_scv.resize( num_zones );
theta_scv.resize( num_zones );
dSw_scv.resize( num_zones );
krw_scv.resize( num_zones );
for(int i=0; i<num_zones; i++){
head_scv[i] = TVecDevice( index_scv[i].size() );
phi_scv[i] = TVecDevice( index_scv[i].size() );
dphi_scv[i] = TVecDevice( index_scv[i].size() );
Sw_scv[i] = TVecDevice( index_scv[i].size() );
theta_scv[i] = TVecDevice( index_scv[i].size() );
dSw_scv[i] = TVecDevice( index_scv[i].size() );
krw_scv[i] = TVecDevice( index_scv[i].size() );
}
// this will hold global (face, edge) pairs of each mapped node value in each zone
std::vector<std::multimap<int, std::pair<int, int> > > faceEdge_map_front;
std::vector<std::multimap<int, std::pair<int, int> > > faceEdge_map_back;
faceEdge_map_front.resize(num_zones);
faceEdge_map_back.resize(num_zones);
for( int i=0; i<m.edges(); i++ ){
const std::vector<int>& edge_cvfaces = m.edge_cvface(i);
int fid = m.edge(i).front().id();
int bid = m.edge(i).back().id();
for(int j=0; j<edge_cvfaces.size(); j++){
int f = edge_cvfaces[j];
int z = zones_map_[m.cvface(f).element().physical_tag()];
int n = nodes_idx[z][fid];
faceEdge_map_front[z].insert(
std::pair<int, std::pair<int, int> >( n, std::pair<int, int>(f, i))
);
n = nodes_idx[z][bid];
faceEdge_map_back[z].insert(
std::pair<int, std::pair<int, int> >( n, std::pair<int, int>(f, i))
);
}
}
// should also reserve memory for the vectors
n_front_.resize(num_zones);
p_front_.resize(num_zones);
q_front_.resize(num_zones);
n_back_.resize(num_zones);
p_back_.resize(num_zones);
q_back_.resize(num_zones);
typedef std::multimap<int, std::pair<int, int> >::iterator idxTypeIt;
for(int z=0; z<num_zones; z++){
std::vector<int> n_front;
std::vector<int> p_front;
std::vector<int> q_front;
std::vector<int> n_back;
std::vector<int> p_back;
std::vector<int> q_back;
int len = head_scv[z].dim();
for(int i=0; i<len; i++){
std::pair<idxTypeIt, idxTypeIt> rng = faceEdge_map_front[z].equal_range(i);
for( idxTypeIt it=rng.first; it!=rng.second; ++it ){
n_front.push_back(i); // local node id
q_front.push_back(it->second.first); // global face index
p_front.push_back(it->second.second); // global edge index
}
rng = faceEdge_map_back[z].equal_range(i);
for( idxTypeIt it=rng.first; it!=rng.second; ++it ){
n_back.push_back(i); // local node id
q_back.push_back(it->second.first); // global face index
p_back.push_back(it->second.second); // global edge index
}
}
n_front_[z] = TIndexVec(n_front.begin(), n_front.end());
p_front_[z] = TIndexVec(p_front.begin(), p_front.end());
q_front_[z] = TIndexVec(q_front.begin(), q_front.end());
n_back_[z] = TIndexVec(n_back.begin(), n_back.end());
p_back_[z] = TIndexVec(p_back.begin(), p_back.end());
q_back_[z] = TIndexVec(q_back.begin(), q_back.end());
}
// OUTPUT
for(int i=0; i<num_zones; i++){
PRINT(fid,n_front_[i]);
PRINT(fid,n_back_[i]);
PRINT(fid,p_front_[i]);
PRINT(fid,p_back_[i]);
PRINT(fid,q_front_[i]);
PRINT(fid,q_back_[i]);
}
edge_weight_front_ = TVecDevice(m.edges(), 0.5);
edge_weight_back_ = TVecDevice(m.edges(), 0.5);
TIndexVec edge_node_front(m.edges());
TIndexVec edge_node_back(m.edges());
for( int i=0; i<m.edges(); i++){
edge_node_front[i] = m.edge(i).front().id();
edge_node_back[i] = m.edge(i).back().id();
}
// copy onto device
edge_node_front_ = edge_node_front;
edge_node_back_ = edge_node_back;
// OUTPUT
PRINT(fid,edge_node_front_);
PRINT(fid,edge_node_back_);
// initialise the shape functions
initialise_shape_functions(m);
// initialise flux vecs
qsat_faces_.set(m.interior_cvfaces(), m.dim());
norm_faces_.set(m.interior_cvfaces(), m.dim());
TVec X(m.interior_cvfaces());
TVec Y(m.interior_cvfaces());
TVec Z(m.interior_cvfaces());
for( int i=0; i<m.interior_cvfaces(); i++ ){
Point nrm = m.cvface(i).normal();
X[i] = nrm.x;
Y[i] = nrm.y;
if( m.dim()==3 )
Z[i] = nrm.z;
}
norm_faces_.x() = X;
norm_faces_.y() = Y;
if(m.dim()==3)
norm_faces_.z() = Z;
K_faces_.set(m.interior_cvfaces(), m.dim());
for( int i=0; i<m.interior_cvfaces(); i++ ){
int tag = m.cvface(i).element().physical_tag();
X[i] = -physical_zone(tag).K_xx;
Y[i] = -physical_zone(tag).K_yy;
if( m.dim()==3 )
Z[i] = -physical_zone(tag).K_zz;
}
K_faces_.x() = X;
K_faces_.y() = Y;
if(m.dim()==3)
K_faces_.z() = Z;
}
template <typename CoordHost, typename CoordDevice>
void VarSatPhysicsImpl<CoordHost,CoordDevice>::process_faces_shape( const mesh::Mesh &m )
{
//density(h_faces, rho_faces, constants());
rho_faces(all) = constants().rho_0();
}
template <typename CoordHost, typename CoordDevice>
void VarSatPhysicsImpl<CoordHost,CoordDevice>::process_faces_lim( const mesh::Mesh &m )
{
if(CoordTraits_<CoordDeviceInt>::is_device()){
lin::gpu::collect_edges(
rho_vec.data(), rho_faces_lim.data(), m.edges(),
edge_weight_front_.data(), edge_weight_back_.data(),
edge_node_front_.data(), edge_node_back_.data(),
flux_lim_matrix.row_ptrs().data(), flux_lim_matrix.col_indexes().data() );
}
else{
const int *ia = flux_lim_matrix.row_ptrs().data();
const int *ja = flux_lim_matrix.col_indexes().data();
double *rho_face_ptr = rho_faces_lim.data();
double rho_edge;
int e;
#pragma omp parallel for schedule(static) shared(rho_face_ptr, ja, ia) private(e, rho_edge)
for( e=0; e<m.edges(); e++ ){
rho_edge =
rho_vec.at(edge_node_back_[e])*edge_weight_back_.at(e)
+ rho_vec.at(edge_node_front_[e])*edge_weight_front_.at(e);
for( int j=ia[e]; j<ia[e+1]; j++)
rho_face_ptr[ja[j]] = rho_edge;
}
}
}
template <typename CoordHost, typename CoordDevice>
void VarSatPhysicsImpl<CoordHost,CoordDevice>::process_fluxes( double t, const mesh::Mesh &m )
{
int ifaces=m.interior_cvfaces();
// initialise the flux to zero
qdotn_faces.zero();
// compute the vector quantity q at each internal CV face
qsat_faces_.x().at(all) = grad_h_faces_.x();
qsat_faces_.x() *= K_faces_.x();
qsat_faces_.y().at(all) = grad_h_faces_.y();
if( m.dim()==2 ){
qsat_faces_.y() += 1.;
}else{
qsat_faces_.z().at(all) = grad_h_faces_.z();
qsat_faces_.z() += 1.;
qsat_faces_.z() *= K_faces_.z();
}
qsat_faces_.y() *= K_faces_.y();
qdotn_faces.at(0,ifaces-1) = mul(norm_faces_.x(), qsat_faces_.x());
qdotn_faces.at(0,ifaces-1) += mul(norm_faces_.y(), qsat_faces_.y());
if( m.dim()==3 ){
qdotn_faces.at(0,ifaces-1) += mul(norm_faces_.z(), qsat_faces_.z());
}
qdotn_faces.at(0,ifaces-1) *= krw_faces_lim;
M_flux_faces.at(0,ifaces-1) = mul(rho_faces_lim, qdotn_faces);
// loop over boundary faces and find fluid flux where
// explicitly given by BCs
// temp host vector for computing the boundary fluxes
int faces_bnd = m.cvfaces()-m.interior_cvfaces();
TVec qdotn_faces_bnd(faces_bnd);
for( int i=0; i<faces_bnd; i++)
{
const mesh::CVFace& cvf = m.cvface(i+m.interior_cvfaces());
int boundary_tag = cvf.boundary();
const BoundaryCondition& BCh = boundary_condition_h( boundary_tag );
switch( BCh.type() ){
// prescribed flux
case 3:
qdotn_faces_bnd.at(i) = BCh.value(t) * cvf.area();
break;
// prescribed directional flux
case 6:
qdotn_faces_bnd.at(i) = BCh.flux( t, cvf.normal() ) * cvf.area();
break;
// seepage
case 7:
qdotn_faces_bnd.at(i) = BCh.value(t) * cvf.area();
break;
// seepage/hydrostatic shoreline
case 8:
qdotn_faces_bnd.at(i) = 0. * cvf.area();
break;
default:
break;
}
}
qdotn_faces.at(ifaces,m.cvfaces()-1) = qdotn_faces_bnd;
// find mass flux at boundary faces : scale by density
M_flux_faces.at(m.interior_cvfaces(), lin::end) =
constants().rho_0() *
qdotn_faces.at(m.interior_cvfaces(), lin::end);
}
template <typename CoordHost, typename CoordDevice>
void VarSatPhysicsImpl<CoordHost,CoordDevice>::process_spatial_weights(const mesh::Mesh& m){
// determine the flux over each edge
flux_lim_matrix.matvec( qdotn_faces.at(0,m.interior_cvfaces()-1), edge_flux );
switch( spatial_weighting ){
case weightAveraging :
assert(false);
break;
////////////////////////////////////////////////////////
// the upwinding case is simple
////////////////////////////////////////////////////////
case weightUpwind :
if(CoordTraits_<CoordDeviceInt>::is_device()){
lin::gpu::set_weights_upwind(
edge_flux.data(),
edge_weight_front_.data(),
edge_weight_back_.data(),
m.edges()
);
}
else{
for(int i=0; i<m.edges(); i++){
if( edge_flux.at(i)<0. ){
edge_weight_front_.at(i) = 1.;
edge_weight_back_.at(i) = 0.;
}
else{
edge_weight_front_.at(i) = 0.;
edge_weight_back_.at(i) = 1.;
}
}
}
break;
////////////////////////////////////////////////////////
// the flux limitting case takes a bit more work
////////////////////////////////////////////////////////
case weightVanLeer :
for(int i=0; i<m.edges(); i++){
if( edge_flux.at(i)>0. ){
edge_up[i] = m.edge(i).back().id();
edge_down[i] = m.edge(i).front().id();
}
else{
edge_up[i] = m.edge(i).front().id();
edge_down[i] = m.edge(i).back().id();
}
}
// find the up node for each CV
for(int i=0; i<m.local_nodes(); i++){
CV_flux.at(i) = 0.;
CV_up[i] = -1;
}
// set the flux into each boundary node to be that from over the boundary
for(int i=m.interior_cvfaces(); i<m.cvfaces(); i++){
int n=m.cvface(i).back().id();
CV_flux.at(n) -= qdotn_faces.at(i);
}
// now find max flux into each CV
for(int i=0; i<m.edges(); i++){
if( edge_node_front_[i]<m.local_nodes() || edge_node_back_[i]<m.local_nodes() ){
int CV = edge_down[i];
if( CV<m.local_nodes() ){
double fl = fabs(edge_flux.at(i));
if( fl>CV_flux[CV] ){
CV_flux[CV] = fl;
CV_up[CV] = edge_up[i];
}
}
}
}
// verify that each CV was assigned an upwind point
for(int i=0; i<m.local_nodes(); i++){
if(CV_up[i]==-1){
CV_up[i] = i;
}
}
*node_comm_.mpicomm() << "VarSatPhysicsImpl::process_spatial_weights : communicating 2up fluxes values accross subdomain boundaries" << std::endl;
node_comm_.send(CV_flux_comm_tag);
node_comm_.recv(CV_flux_comm_tag);
// find r and sigma for each edge
for(int i=0; i<m.edges(); i++){
if( edge_node_front_[i]<m.local_nodes() || edge_node_back_[i]<m.local_nodes() ){
double qup = fabs(edge_flux.at(i));
double q2up = CV_flux.at(edge_up[i]);
double r = q2up / qup;
double sigma;
if( qup==0. )
sigma = 1.;
else if(r>1.e10)
sigma = 2.;
else
sigma = (r+fabs(r)) / (1.+fabs(r));
if( edge_flux.at(i)>0. ){
edge_weight_back_.at(i) = sigma/2.;
edge_weight_front_.at(i) = 1.-sigma/2.;
}
else{
edge_weight_back_.at(i) = 1.-sigma/2.;
edge_weight_front_.at(i) = sigma/2.;
}
}
}
}
}
template <typename CoordHost, typename CoordDevice>
void VarSatPhysicsImpl<CoordHost,CoordDevice>::process_volumes_psk( const mesh::Mesh &m )
{
double beta = constants().beta();
double rho_0 = constants().rho_0();
double g = constants().g();
// zero out vectors of CV-averaged derived quantities
phi_vec.zero();
dphi_vec.zero();
Sw_vec.zero();
dSw_vec.zero();
theta_vec.zero();
// for each zone calucluate the scv-weighted derived quantities and add them
// to the appropriate CV-averaged vectors
double T=0.;
for( std::map<int, int>::iterator it=zones_map_.begin();
it!=zones_map_.end();
it++)
{
int zone = (*it).second;
int indx = (*it).first;
int n = index_scv.size();
const PhysicalZone& props = physical_zone(indx);
// get head data for this zone type
head_scv[zone].at(all) = h_vec.at(index_scv[zone]);
// find porosity and scale by weights
porosity(head_scv[zone], phi_scv[zone], dphi_scv[zone], props, constants());
// determine the saturation, rel. permeability and dSw/dh
saturation( head_scv[zone], props, Sw_scv[zone], dSw_scv[zone], krw_scv[zone] );
// moisture content
theta_scv[zone].at(all) = mul(Sw_scv[zone], phi_scv[zone]);
// copy into global vector
phi_vec.at(index_scv[zone]) += mul(phi_scv[zone], weight_scv[zone]);
dphi_vec.at(index_scv[zone]) += mul(dphi_scv[zone], weight_scv[zone]);
Sw_vec.at(index_scv[zone]) += mul(Sw_scv[zone], weight_scv[zone]);
dSw_vec.at(index_scv[zone]) += mul(dSw_scv[zone], weight_scv[zone]);
theta_vec.at(index_scv[zone]) += mul(theta_scv[zone], weight_scv[zone]);
krw_faces_lim.at(q_front_[zone]) = mul(
krw_scv[zone].at(n_front_[zone]),
edge_weight_front_.at(p_front_[zone]) );
krw_faces_lim.at(q_back_[zone]) += mul(
krw_scv[zone].at(n_back_[zone]),
edge_weight_back_.at(p_back_[zone]) );
}
// find the CV-averaged density
density(h_vec, rho_vec, constants());
}
template <typename CoordHost, typename CoordDevice>
void VarSatPhysicsImpl<CoordHost,CoordDevice>::process_derivative_coefficients( const mesh::Mesh &m )
{
double rho_0 = constants().rho_0();
double g = constants().g();
double beta = constants().beta();
double factor = rho_0*rho_0*g*beta;
/*
for( int i=0; i<ahh_vec.dim(); i++ )
ahh_vec.at(i) = rho_vec.at(i)*phi_vec.at(i)*dSw_vec.at(i) + rho_vec.at(i)*Sw_vec.at(i)*dphi_vec.at(i) + factor*phi_vec.at(i)*Sw_vec.at(i);
*/
ahh_vec(all) = mul(phi_vec, dSw_vec);
ahh_vec(all) *= rho_0;
}
template <typename CoordHost, typename CoordDevice>
void VarSatPhysicsImpl<CoordHost,CoordDevice>::initialise_shape_functions(const mesh::Mesh& m)
{
// matrices with weights for computing shape functions
TIndexVec ia, ja;
TVec shape_val, shape_dx, shape_dy, shape_dz;
// Allocate row begin array
int ia_length = m.interior_cvfaces() + 1;
ia = TIndexVec(ia_length);
// Fill row begin array
ia[0] = 0;
for (int i = 0; i < m.interior_cvfaces(); ++i) {
ia[i+1] = ia[i] + m.cvface(i).element().nodes();
}
// Allocate matrix arrays
int ja_length = ia[ia_length-1];
ja = TIndexVec(ja_length);
shape_val = TVec(ja_length);
shape_dx = TVec(ja_length);
shape_dy = TVec(ja_length);
shape_dz = TVec(ja_length);
// Allocate node value arrays
h_vec = TVec(m.nodes());
M_vec_ = TVec(m.nodes());
hp_vec_ = TVec(m.nodes());
Mp_vec_ = TVec(m.nodes());
// Allocate CVFace centroid arrays
h_faces = TVec(m.interior_cvfaces());
grad_h_faces_.set(m.interior_cvfaces(), m.dim());
// Fill other arrays;
for (int i = 0; i < m.elements(); ++i) {
const mesh::Element& e = m.element(i);
// Sort the node ids, to get the index vector
std::vector< std::pair<int, int> > index_vector(e.nodes());
for (int k = 0; k < e.nodes(); ++k) {
index_vector[k] = std::make_pair(e.node(k).id(), k);
}
std::sort(index_vector.begin(), index_vector.end());
shape::Shape my_shape(e);
for (int j = 0; j < e.edges(); ++j) {
int cvf_id = e.cvface(j).id();
// Record ja indices
const mesh::CVFace& cvf = e.cvface(j);
for (int k = 0, p = ia[cvf_id]; p < ia[cvf_id+1]; ++k, ++p) {
ja[p] = index_vector[k].first;
}
// Get shape functions and gradients
std::vector<double> shape_functions = my_shape.shape_functions(j);
std::vector<mesh::Point> shape_gradients = my_shape.shape_gradients(j);
// Now load them into the matrices
for (int k = 0, p = ia[cvf_id]; p < ia[cvf_id+1]; ++k, ++p) {
shape_val[p] = shape_functions[index_vector[k].second];
shape_dx[p] = shape_gradients[index_vector[k].second].x;
shape_dy[p] = shape_gradients[index_vector[k].second].y;
shape_dz[p] = shape_gradients[index_vector[k].second].z;
}
}
}
shape_matrix = InterpolationMatrix(ia, ja, shape_val);
shape_gradient_matrixX = InterpolationMatrix(ia, ja, shape_dx);
shape_gradient_matrixY = InterpolationMatrix(ia, ja, shape_dy);
if (dimension == 3)
shape_gradient_matrixZ = InterpolationMatrix(ia, ja, shape_dz);
//////////////////////////////////////////////////////////
// MATRIX FOR FLUX LIMITTING
// num_edges X num_cvfaces
// sums the fluxes at each face associated with an edge
// which gives the total flux between the control volumes
// that share the edge
//////////////////////////////////////////////////////////
TIndexVec ia_fl, ja_fl;
TVec weights_fl;
// allocate space for row begin indices
ia_length = m.edges()+1;
ia_fl = TIndexVec(ia_length);
ia_fl[0] = 0;
for (int i = 0; i < m.edges(); ++i) {
ia_fl[i+1] = ia_fl[i] + m.edge_cvface(i).size();
}
// allocate space for column indices
ja_length = ia_fl[ia_length-1];
ja_fl = TIndexVec(ja_length);
// allocate space for weights
//weights_fl.resize(ja_length);
weights_fl = TVec(ja_length, 0.);
for(int i=0; i<m.edges(); i++){
const std::vector<int>& faces = m.edge_cvface(i);
// determine the total surface area of the faces attached to edge i
double total_area = 0.;
for(int j=0; j<faces.size(); j++)
total_area += m.cvface(faces[j]).area();
// now determine the scaled weights
int pos = ia_fl[i];
for(int j=0; j<faces.size(); j++){
int face = faces[j];
//weights_fl[pos] = m.cvface(face).area()/total_area;
weights_fl.at(pos) = 1./total_area;
ja_fl[pos] = face;
pos++;
}
}
flux_lim_matrix = InterpolationMatrix(ia_fl, ja_fl, weights_fl);
//////////////////////////////////////////////////////////
// MATRIX FOR CALCULATING FLUX OVER A CV SURFACE
// num_nodes X num_cvfaces
// sums the flux over each CV face that defines the surface
// of the control volume around each node
//////////////////////////////////////////////////////////
TIndexVec ia_cl, ja_cl;
TVec weights_cl;
int N=m.local_nodes();
ia_length = N+1;
ia_cl = TIndexVec(ia_length);
ia_cl[0] = 0;
TIndexVec face_counts(N,0);
std::vector<int> col_indexes;
std::vector<double> weights_tmp;
for (int i = 0; i < N; ++i) {
const mesh::Volume& v = m.volume(i);
double w = 1./v.vol();
std::vector<int> node_faces;
// make a list of the cv faces that form the
// surface of the control volume around node i
for(int j=0; j<v.scvs(); j++){
const mesh::SCV& s = v.scv(j);
for(int k=0; k<s.cvfaces(); k++)
node_faces.push_back(s.cvface(k).id());
}
// sort the faces in ascending order
std::sort(node_faces.begin(),node_faces.end());
// add them to the column index
for(int j=0; j<node_faces.size(); j++)
col_indexes.push_back(node_faces[j]);
// update the row pointer
ia_cl[i+1] = ia_cl[i]+node_faces.size();
// choose the weight for each face
for(int j=0; j<node_faces.size(); j++){
// note that the order of evaluation is very important here
// because if a cv face lies on the boundary it
// has no front node
if(i==m.cvface(node_faces[j]).back().id())
weights_tmp.push_back(-w);
else
weights_tmp.push_back(w);
}
}
// assign the column index and weights
ja_cl.assign(col_indexes.begin(), col_indexes.end());
weights_cl.assign(weights_tmp.begin(), weights_tmp.end());
cvflux_matrix = InterpolationMatrix(ia_cl, ja_cl, weights_cl);
//cvflux_matrix.write_to_file(std::string("../../../../cvflux.m"), util::file_format_matlab);
}
// get a copy of a set of physical zone properties
template <typename CoordHost, typename CoordDevice>
const PhysicalZone& VarSatPhysicsImpl<CoordHost,CoordDevice>::physical_zone( int zone ) const
{
if(!(zone>=0 && zone<physical_zones_.size()))
assert(zone>=0 && zone<physical_zones_.size());
return physical_zones_[zone];
}
// get the number of physical zones
template <typename CoordHost, typename CoordDevice>
int VarSatPhysicsImpl<CoordHost,CoordDevice>::physical_zones( void ) const
{
return physical_zones_.size();
}
template <typename CoordHost, typename CoordDevice>
const BoundaryCondition& VarSatPhysicsImpl<CoordHost,CoordDevice>::boundary_condition_h( int tag ) const{
std::map<int,BoundaryCondition>::const_iterator it = boundary_conditions_h_.find(tag);
assert( it!=boundary_conditions_h_.end());
return it->second;
}
} // end namespace fvmpor
#endif
|
GB_unop__identity_int16_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int16_int8
// op(A') function: GB_unop_tran__identity_int16_int8
// C type: int16_t
// A type: int8_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int16_t z = (int16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = (int16_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int16_int8
(
int16_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int16_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__minv_int8_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int8_uint64
// op(A') function: GB_tran__minv_int8_uint64
// C type: int8_t
// A type: uint64_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 8)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 8) ;
// casting
#define GB_CASTING(z, x) \
int8_t z = (int8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT8 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int8_uint64
(
int8_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int8_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
dynmat.c | /* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <math.h>
#include <stdlib.h>
#include <dynmat.h>
#define PI 3.14159265358979323846
static void get_dynmat_ij(double *dynamical_matrix,
const int num_patom,
const int num_satom,
const double *fc,
const double *q,
const double *r,
const int *multi,
const double *mass,
const int *s2p_map,
const int *p2s_map,
const double *charge_sum,
const int i,
const int j);
static void get_dm(double dm_real[3][3],
double dm_imag[3][3],
const int num_patom,
const int num_satom,
const double *fc,
const double *q,
const double *r,
const int *multi,
const double mass_sqrt,
const int *p2s_map,
const double *charge_sum,
const int i,
const int j,
const int k);
static double get_dielectric_part(const double q[3],
const double *dielectric);
int get_dynamical_matrix_at_q(double *dynamical_matrix,
const int num_patom,
const int num_satom,
const double *fc,
const double *q,
const double *r,
const int *multi,
const double *mass,
const int *s2p_map,
const int *p2s_map,
const double *charge_sum,
const int with_openmp)
{
int i, j, ij, adrs, adrsT;
if (with_openmp) {
#pragma omp parallel for
for (ij = 0; ij < num_patom * num_patom ; ij++) {
get_dynmat_ij(dynamical_matrix,
num_patom,
num_satom,
fc,
q,
r,
multi,
mass,
s2p_map,
p2s_map,
charge_sum,
ij / num_patom, /* i */
ij % num_patom); /* j */
}
} else {
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
get_dynmat_ij(dynamical_matrix,
num_patom,
num_satom,
fc,
q,
r,
multi,
mass,
s2p_map,
p2s_map,
charge_sum,
i,
j);
}
}
}
/* Symmetrize to be a Hermitian matrix */
for (i = 0; i < num_patom * 3; i++) {
for (j = i; j < num_patom * 3; j++) {
adrs = i * num_patom * 6 + j * 2;
adrsT = j * num_patom * 6 + i * 2;
dynamical_matrix[adrs] += dynamical_matrix[adrsT];
dynamical_matrix[adrs] /= 2;
dynamical_matrix[adrs + 1] -= dynamical_matrix[adrsT+ 1];
dynamical_matrix[adrs + 1] /= 2;
dynamical_matrix[adrsT] = dynamical_matrix[adrs];
dynamical_matrix[adrsT + 1] = -dynamical_matrix[adrs + 1];
}
}
return 0;
}
void get_dipole_dipole(double *dd, /* [natom, 3, natom, 3, (real, imag)] */
const double *K_list, /* [num_kvec, 3] */
const int num_K,
const int num_patom,
const double *q_vector,
const double *q_direction,
const double *born,
const double *dielectric,
const double factor, /* 4pi/V*unit-conv */
const double *pos, /* [natom, 3] */
const double tolerance)
{
int i, j, k, l, g, adrs;
double q_K[3], q_G[3];
double norm, cos_phase, sin_phase, phase, z;
double *charge_sum;
charge_sum = NULL;
for (i = 0; i < num_patom * num_patom * 18; i++) {
dd[i] = 0;
}
charge_sum = (double*) malloc(sizeof(double) * num_patom * num_patom * 9);
for (g = 0; g < num_K; g++) {
norm = 0;
for (i = 0; i < 3; i++) {
norm += K_list[g * 3 + i] * K_list[g * 3 + i];
}
if (sqrt(norm) < tolerance) {
if (!q_direction) {
continue;
} else {
for (i = 0; i < 3; i++) {q_K[i] = q_direction[i];}
}
} else {
for (i = 0; i < 3; i++) {q_K[i] = K_list[g * 3 + i];}
}
get_charge_sum(charge_sum,
num_patom,
factor / get_dielectric_part(q_K, dielectric),
q_K,
born);
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
phase = 0;
for (k = 0; k < 3; k++) {
phase += (pos[i * 3 + k] - pos[j * 3 + k]) * K_list[g * 3 + k];
}
phase *= 2 * PI;
cos_phase = cos(phase);
sin_phase = sin(phase);
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
adrs = i * num_patom * 18 + k * num_patom * 6 + j * 6 + l * 2;
z = charge_sum[i * num_patom * 9 + j * 9 + k * 3 + l];
dd[adrs] += z * cos_phase;
dd[adrs + 1] += z * sin_phase;
}
}
}
}
}
for (g = 0; g < num_K; g++) {
norm = 0;
for (i = 0; i < 3; i++) {
q_G[i] = K_list[g * 3 + i] - q_vector[i];
norm += q_G[i] * q_G[i];
}
if (sqrt(norm) < tolerance) {
continue;
}
get_charge_sum(charge_sum,
num_patom,
factor / get_dielectric_part(q_G, dielectric),
q_G,
born);
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
phase = 0;
for (k = 0; k < 3; k++) {
phase += (pos[i * 3 + k] - pos[j * 3 + k]) * q_G[k];
}
phase *= 2 * PI;
cos_phase = cos(phase);
sin_phase = sin(phase);
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
/* i is correct about the third index, not j */
adrs = i * num_patom * 18 + k * num_patom * 6 + i * 6 + l * 2;
z = charge_sum[i * num_patom * 9 + j * 9 + k * 3 + l];
dd[adrs] -= z * cos_phase;
dd[adrs + 1] -= z * sin_phase;
}
}
}
}
}
free(charge_sum);
charge_sum = NULL;
}
void get_charge_sum(double *charge_sum,
const int num_patom,
const double factor, /* 4pi/V*unit-conv and denominator */
const double q_vector[3],
const double *born)
{
int i, j, k, a, b;
double (*q_born)[3];
q_born = (double (*)[3]) malloc(sizeof(double[3]) * num_patom);
for (i = 0; i < num_patom; i++) {
for (j = 0; j < 3; j++) {
q_born[i][j] = 0;
}
}
for (i = 0; i < num_patom; i++) {
for (j = 0; j < 3; j++) {
for (k = 0; k < 3; k++) {
q_born[i][j] += q_vector[k] * born[i * 9 + k * 3 + j];
}
}
}
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
for (a = 0; a < 3; a++) {
for (b = 0; b < 3; b++) {
charge_sum[i * 9 * num_patom + j * 9 + a * 3 + b] =
q_born[i][a] * q_born[j][b] * factor;
}
}
}
}
free(q_born);
q_born = NULL;
}
static void get_dynmat_ij(double *dynamical_matrix,
const int num_patom,
const int num_satom,
const double *fc,
const double *q,
const double *r,
const int *multi,
const double *mass,
const int *s2p_map,
const int *p2s_map,
const double *charge_sum,
const int i,
const int j)
{
int k, l, adrs;
double mass_sqrt;
double dm_real[3][3], dm_imag[3][3];
mass_sqrt = sqrt(mass[i] * mass[j]);
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
dm_real[k][l] = 0;
dm_imag[k][l] = 0;
}
}
for (k = 0; k < num_satom; k++) { /* Lattice points of right index of fc */
if (s2p_map[k] != p2s_map[j]) {
continue;
}
get_dm(dm_real,
dm_imag,
num_patom,
num_satom,
fc,
q,
r,
multi,
mass_sqrt,
p2s_map,
charge_sum,
i,
j,
k);
}
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
adrs = (i * 3 + k) * num_patom * 6 + j * 6 + l * 2;
dynamical_matrix[adrs] = dm_real[k][l];
dynamical_matrix[adrs + 1] = dm_imag[k][l];
}
}
}
static void get_dm(double dm_real[3][3],
double dm_imag[3][3],
const int num_patom,
const int num_satom,
const double *fc,
const double *q,
const double *r,
const int *multi,
const double mass_sqrt,
const int *p2s_map,
const double *charge_sum,
const int i,
const int j,
const int k)
{
int l, m;
double phase, cos_phase, sin_phase, fc_elem;
cos_phase = 0;
sin_phase = 0;
for (l = 0; l < multi[k * num_patom + i]; l++) {
phase = 0;
for (m = 0; m < 3; m++) {
phase += q[m] * r[k * num_patom * 81 + i * 81 + l * 3 + m];
}
cos_phase += cos(phase * 2 * PI) / multi[k * num_patom + i];
sin_phase += sin(phase * 2 * PI) / multi[k * num_patom + i];
}
for (l = 0; l < 3; l++) {
for (m = 0; m < 3; m++) {
if (charge_sum) {
fc_elem = (fc[p2s_map[i] * num_satom * 9 + k * 9 + l * 3 + m] +
charge_sum[i * num_patom * 9 +
j * 9 + l * 3 + m]) / mass_sqrt;
} else {
fc_elem = fc[p2s_map[i] * num_satom * 9 +
k * 9 + l * 3 + m] / mass_sqrt;
}
dm_real[l][m] += fc_elem * cos_phase;
dm_imag[l][m] += fc_elem * sin_phase;
}
}
}
static double get_dielectric_part(const double q[3],
const double *dielectric)
{
int i, j;
double x[3];
double sum;
for (i = 0; i < 3; i++) {
x[i] = 0;
for (j = 0; j < 3; j++) {
x[i] += dielectric[i * 3 + j] * q[j];
}
}
sum = 0;
for (i = 0; i < 3; i++) {
sum += q[i] * x[i];
}
return sum;
}
|
compiler_cgen.c | /* Generated by Nim Compiler v0.15.0 */
/* (c) 2016 Andreas Rumpf */
/* The generated code is subject to the original license. */
#define NIM_INTBITS 64
#include "nimbase.h"
#include <string.h>
typedef struct Tcgen529027 Tcgen529027;
typedef struct TNimType TNimType;
typedef struct TNimNode TNimNode;
typedef struct Ropeobj178006 Ropeobj178006;
typedef struct NimStringDesc NimStringDesc;
typedef struct TGenericSeq TGenericSeq;
typedef struct Cell47305 Cell47305;
typedef struct Cellseq47321 Cellseq47321;
typedef struct Gcheap49818 Gcheap49818;
typedef struct Gcstack49816 Gcstack49816;
typedef struct Memregion29486 Memregion29486;
typedef struct Smallchunk29440 Smallchunk29440;
typedef struct Llchunk29480 Llchunk29480;
typedef struct Bigchunk29442 Bigchunk29442;
typedef struct Intset29414 Intset29414;
typedef struct Trunk29410 Trunk29410;
typedef struct Avlnode29484 Avlnode29484;
typedef struct Gcstat49814 Gcstat49814;
typedef struct Cellset47317 Cellset47317;
typedef struct Pagedesc47313 Pagedesc47313;
typedef struct Ttypeseq292836 Ttypeseq292836;
typedef struct Ttype292840 Ttype292840;
typedef struct Intset268030 Intset268030;
typedef struct Trunk268026 Trunk268026;
typedef struct Trunkseq268028 Trunkseq268028;
typedef struct Tpasscontext341002 Tpasscontext341002;
typedef struct Tsym292834 Tsym292834;
typedef struct Tidobj199004 Tidobj199004;
typedef struct TNimObject TNimObject;
typedef struct TY292929 TY292929;
typedef struct Tstrtable292806 Tstrtable292806;
typedef struct Tsymseq292804 Tsymseq292804;
typedef struct Tident199010 Tident199010;
typedef struct Tlineinfo191336 Tlineinfo191336;
typedef struct Tnode292802 Tnode292802;
typedef struct Tloc292816 Tloc292816;
typedef struct Tlib292820 Tlib292820;
typedef struct TY529153 TY529153;
typedef struct TY203018 TY203018;
typedef struct Tidtable292850 Tidtable292850;
typedef struct Tidpairseq292848 Tidpairseq292848;
typedef struct Tlinkedlist147013 Tlinkedlist147013;
typedef struct Tlistentry147007 Tlistentry147007;
typedef struct Tcproc529021 Tcproc529021;
typedef struct Tnodetable292862 Tnodetable292862;
typedef struct Tnodepairseq292860 Tnodepairseq292860;
typedef struct Debuginfo203009 Debuginfo203009;
typedef struct TY203021 TY203021;
typedef struct TY203023 TY203023;
typedef struct Tnodeseq292796 Tnodeseq292796;
typedef struct TY191350 TY191350;
typedef struct TY529095 TY529095;
typedef struct Trodreader332021 Trodreader332021;
typedef struct TY292960 TY292960;
typedef struct TY203017 TY203017;
typedef struct Enumdesc203007 Enumdesc203007;
typedef struct Tinfocc273008 Tinfocc273008;
typedef struct Tblock529019 Tblock529019;
typedef struct Ttraversalclosure537019 Ttraversalclosure537019;
typedef struct TY135002 TY135002;
typedef struct Tbitset339004 Tbitset339004;
typedef struct TY191612 TY191612;
typedef struct Tfileinfo191334 Tfileinfo191334;
typedef struct Tinfoos176035 Tinfoos176035;
typedef struct Tinfocpu176476 Tinfocpu176476;
typedef struct Tstrentry147009 Tstrentry147009;
typedef struct TY128506 TY128506;
typedef struct Basechunk29438 Basechunk29438;
typedef struct Freecell29430 Freecell29430;
typedef struct Tinstantiation292824 Tinstantiation292824;
typedef struct Tidpair292846 Tidpair292846;
typedef struct Tnodepair292858 Tnodepair292858;
typedef struct Filenamemapping203005 Filenamemapping203005;
typedef struct TY332033 TY332033;
typedef struct Tindex332019 Tindex332019;
typedef struct Tiitable299142 Tiitable299142;
typedef struct Tiipairseq299140 Tiipairseq299140;
typedef struct Table332054 Table332054;
typedef struct Keyvaluepairseq332057 Keyvaluepairseq332057;
typedef struct Memfile330202 Memfile330202;
typedef struct TY292961 TY292961;
typedef struct Tiipair299138 Tiipair299138;
typedef struct Keyvaluepair332060 Keyvaluepair332060;
typedef NU8 Tnimkind3403;
typedef NU8 Tnimtypeflag3409Set;
typedef N_NIMCALL_PTR(void, TY3489) (void* p0, NI op0);
typedef N_NIMCALL_PTR(void*, TY3494) (void* p0);
struct TNimType {
NI size;
Tnimkind3403 kind;
Tnimtypeflag3409Set flags;
TNimType* base;
TNimNode* node;
void* finalizer;
TY3489 marker;
TY3494 deepcopy;
};
typedef NU8 Tnimnodekind3405;
struct TNimNode {
Tnimnodekind3405 kind;
NI offset;
TNimType* typ;
NCSTRING name;
NI len;
TNimNode** sons;
};
typedef N_NIMCALL_PTR(void, Globalmarkerproc55802) (void);
struct TGenericSeq {
NI len;
NI reserved;
};
struct NimStringDesc {
TGenericSeq Sup;
NIM_CHAR data[SEQ_DECL_SIZE];
};
struct Cell47305 {
NI refcount;
TNimType* typ;
};
struct Cellseq47321 {
NI len;
NI cap;
Cell47305** d;
};
typedef Smallchunk29440* TY29501[512];
typedef Trunk29410* Trunkbuckets29412[256];
struct Intset29414 {
Trunkbuckets29412 data;
};
struct Memregion29486 {
NI minlargeobj;
NI maxlargeobj;
TY29501 freesmallchunks;
Llchunk29480* llmem;
NI currmem;
NI maxmem;
NI freemem;
NI lastsize;
Bigchunk29442* freechunkslist;
Intset29414 chunkstarts;
Avlnode29484* root;
Avlnode29484* deleted;
Avlnode29484* last;
Avlnode29484* freeavlnodes;
NIM_BOOL locked;
};
struct Gcstat49814 {
NI stackscans;
NI cyclecollections;
NI maxthreshold;
NI maxstacksize;
NI maxstackcells;
NI cycletablesize;
NI64 maxpause;
};
struct Cellset47317 {
NI counter;
NI max;
Pagedesc47313* head;
Pagedesc47313** data;
};
struct Gcheap49818 {
Gcstack49816* stack;
void* stackbottom;
NI cyclethreshold;
Cellseq47321 zct;
Cellseq47321 decstack;
Cellseq47321 tempstack;
NI recgclock;
Memregion29486 region;
Gcstat49814 stat;
Cellset47317 marked;
Cellseq47321 additionalroots;
};
struct Intset268030 {
NI counter;
NI max;
Trunk268026* head;
Trunkseq268028* data;
};
struct TNimObject {
TNimType* m_type;
};
struct Tidobj199004 {
TNimObject Sup;
NI id;
};
typedef NU8 Tsymkind292435;
struct Tstrtable292806 {
NI counter;
Tsymseq292804* data;
};
typedef NU16 Tmagic292524;
struct Tlineinfo191336 {
NI16 line;
NI16 col;
NI32 fileindex;
};
typedef NU32 Tsymflag292184Set;
typedef NU32 Toption169009Set;
typedef NU8 Tlockind292808;
typedef NU8 Tstorageloc292812;
typedef NU16 Tlocflag292810Set;
struct Tloc292816 {
Tlockind292808 k;
Tstorageloc292812 s;
Tlocflag292810Set flags;
Ttype292840* t;
Ropeobj178006* r;
};
struct Tsym292834 {
Tidobj199004 Sup;
Tsymkind292435 kind;
union{
struct {Ttypeseq292836* typeinstcache;
} S1;
struct {TY292929* procinstcache;
Tsym292834* gcunsafetyreason;
} S2;
struct {TY292929* usedgenerics;
Tstrtable292806 tab;
} S3;
struct {Tsym292834* guard;
NI bitsize;
} S4;
} kindU;
Tmagic292524 magic;
Ttype292840* typ;
Tident199010* name;
Tlineinfo191336 info;
Tsym292834* owner;
Tsymflag292184Set flags;
Tnode292802* ast;
Toption169009Set options;
NI position;
NI offset;
Tloc292816 loc;
Tlib292820* annex;
Tnode292802* constraint;
};
struct TY203018 {
NimStringDesc* Field0;
NI Field1;
};
struct Tpasscontext341002 {
TNimObject Sup;
NIM_BOOL fromcache;
};
typedef Ropeobj178006* Tcfilesections529009[18];
typedef NU8 Codegenflag529025Set;
struct Tidtable292850 {
NI counter;
Tidpairseq292848* data;
};
struct Tlinkedlist147013 {
Tlistentry147007* head;
Tlistentry147007* tail;
NI counter;
};
struct Tnodetable292862 {
NI counter;
Tnodepairseq292860* data;
};
typedef Ropeobj178006* TY529136[10];
struct Tcgen529027 {
Tpasscontext341002 Sup;
Tcfilesections529009 s;
Codegenflag529025Set flags;
Tsym292834* module;
NimStringDesc* filename;
NimStringDesc* cfilename;
Ropeobj178006* tmpbase;
Tidtable292850 typecache;
Tidtable292850 forwtypecache;
Intset268030 declaredthings;
Intset268030 declaredprotos;
Tlinkedlist147013 headerfiles;
Intset268030 typeinfomarker;
Tcproc529021* initproc;
Tcproc529021* postinitproc;
Tcproc529021* preinitproc;
Ttypeseq292836* typestack;
Tnodetable292862 datacache;
Tsymseq292804* forwardedprocs;
NI typenodes;
NI nimtypes;
Ropeobj178006* typenodesname;
Ropeobj178006* nimtypesname;
NI labels;
TY529136 extensionloaders;
Ropeobj178006* injectstmt;
};
struct Debuginfo203009 {
NI version;
TY203021* files;
TY203023* enums;
NIM_BOOL conflicts;
};
struct Tident199010 {
Tidobj199004 Sup;
NimStringDesc* s;
Tident199010* next;
NI h;
};
struct Tcproc529021 {
Tsym292834* prc;
NIM_BOOL beforeretneeded;
NIM_BOOL threadvaraccessed;
Tlineinfo191336 lastlineinfo;
Tnodeseq292796* nestedtrystmts;
NI inexceptblock;
TY191350* finallysafepoints;
NI labels;
TY529095* blocks;
NI breakidx;
Toption169009Set options;
NI maxframelen;
Tcgen529027* module;
NI withinloop;
NI splitdecls;
NI gcframeid;
Ropeobj178006* gcframetype;
};
typedef NU8 Tsymflag292184;
typedef NU8 Codegenflag529025;
typedef NU8 Toption169009;
typedef NU64 Tglobaloption169013Set;
typedef NU8 Tglobaloption169013;
typedef NU8 Tcommands169076;
typedef NU16 Tnodeflag292427Set;
typedef NU8 Tnodekind292020;
struct Tnode292802 {
Ttype292840* typ;
Tlineinfo191336 info;
Tnodeflag292427Set flags;
Tnodekind292020 kind;
union{
struct {NI64 intval;
} S1;
struct {NF floatval;
} S2;
struct {NimStringDesc* strval;
} S3;
struct {Tsym292834* sym;
} S4;
struct {Tident199010* ident;
} S5;
struct {Tnodeseq292796* sons;
} S6;
} kindU;
NimStringDesc* comment;
};
typedef Ropeobj178006* TY533289[1];
typedef NU8 Tlocflag292810;
struct Tlistentry147007 {
TNimObject Sup;
Tlistentry147007* prev;
Tlistentry147007* next;
};
typedef NU8 Tlibkind292818;
struct Tlib292820 {
Tlistentry147007 Sup;
Tlibkind292818 kind;
NIM_BOOL generated;
NIM_BOOL isoverriden;
Ropeobj178006* name;
Tnode292802* path;
};
typedef NU8 Tcfilesection529005;
typedef NU8 Ttypekind292244;
typedef NU8 Tcallingconvention292002;
typedef NU32 Ttypeflag292431Set;
struct Ttype292840 {
Tidobj199004 Sup;
Ttypekind292244 kind;
Tcallingconvention292002 callconv;
Ttypeflag292431Set flags;
Ttypeseq292836* sons;
Tnode292802* n;
Tsym292834* owner;
Tsym292834* sym;
Tsym292834* destructor;
Tsym292834* deepcopy;
Tsym292834* assignment;
TY292960* methods;
NI64 size;
NI16 align;
NI16 locklevel;
Tloc292816 loc;
};
typedef Ropeobj178006* TY532811[2];
typedef NU8 Tctypekind529007;
typedef NU64 Ttypekind292244Set;
typedef NU8 Ttypeflag292431;
typedef NimStringDesc* TY533943[14];
typedef NU8 Tprefereddesc320011;
typedef Ropeobj178006* TY178507[1];
struct Enumdesc203007 {
NI size;
NU32 owner;
NI id;
NimStringDesc* name;
TY203017* values;
};
typedef Ropeobj178006* TY535235[4];
typedef NimStringDesc* TY292016[10];
typedef Ropeobj178006* TY535238[3];
struct Ropeobj178006 {
TNimObject Sup;
Ropeobj178006* left;
Ropeobj178006* right;
NI length;
NimStringDesc* data;
};
typedef NU8 Tinfoccprop273004Set;
struct Tinfocc273008 {
NimStringDesc* Field0;
NimStringDesc* Field1;
NimStringDesc* Field2;
NimStringDesc* Field3;
NimStringDesc* Field4;
NimStringDesc* Field5;
NimStringDesc* Field6;
NimStringDesc* Field7;
NimStringDesc* Field8;
NimStringDesc* Field9;
NimStringDesc* Field10;
NimStringDesc* Field11;
NimStringDesc* Field12;
NimStringDesc* Field13;
NimStringDesc* Field14;
NimStringDesc* Field15;
NimStringDesc* Field16;
NimStringDesc* Field17;
NimStringDesc* Field18;
NimStringDesc* Field19;
Tinfoccprop273004Set Field20;
};
typedef Tinfocc273008 TY273427[13];
typedef NU8 Tsystemcc273002;
typedef NU8 Tnodeflag292427;
typedef NU8 Tcprocsection529011;
typedef Ropeobj178006* Tcprocsections529013[3];
struct Tblock529019 {
NI id;
Ropeobj178006* label;
Tcprocsections529013 sections;
NIM_BOOL isloop;
NI16 nestedtrystmts;
NI16 nestedexceptstmts;
NI16 framelen;
};
typedef NU8 Tgcmode169080;
typedef NU8 Ttypeinforeason537016;
struct Ttraversalclosure537019 {
Tcproc529021* p;
NimStringDesc* visitorfrmt;
};
typedef NU8 Ttypefieldresult320145;
typedef NU8 Tinfoccprop273004;
typedef Ropeobj178006* TY536847[6];
typedef Ropeobj178006* TY536401[7];
typedef Ropeobj178006* TY536475[5];
typedef NU16 Tmsgkind191002;
typedef NU8 Tassignmentflag538302Set;
typedef NU8 Tassignmentflag538302;
typedef NimStringDesc* TY552655[19];
typedef NimStringDesc* TY551642[3];
typedef NimStringDesc* TY556765[4];
typedef NimStringDesc* TY551828[42];
typedef NimStringDesc* TY551281[7];
typedef NU8 Trenderflag311004Set;
typedef NimStringDesc* TY557052[2];
typedef NU8 Tclosuretypekind535681;
typedef NimStringDesc* TY556428[6];
typedef NU8 Tanalysisresult473003;
typedef NU8 char136Set[32];
typedef NU8 Tdistinctcompare324427;
typedef NU8 Ttypecmpflag324429Set;
typedef NU16 Tspecialword275003;
typedef NU8 Tsystemos176004;
struct Tfileinfo191334 {
NimStringDesc* fullpath;
NimStringDesc* projpath;
NimStringDesc* shortname;
Ropeobj178006* quotedname;
Ropeobj178006* quotedfullname;
TY191350* lines;
NimStringDesc* dirtyfile;
};
typedef NU8 Tinfoosprop176031Set;
struct Tinfoos176035 {
NimStringDesc* Field0;
NimStringDesc* Field1;
NimStringDesc* Field2;
NimStringDesc* Field3;
NimStringDesc* Field4;
NimStringDesc* Field5;
NimStringDesc* Field6;
NimStringDesc* Field7;
NimStringDesc* Field8;
NimStringDesc* Field9;
NimStringDesc* Field10;
NimStringDesc* Field11;
Tinfoosprop176031Set Field12;
};
typedef Tinfoos176035 TY176082[24];
typedef NU8 Tendian176474;
struct Tinfocpu176476 {
NimStringDesc* Field0;
NI Field1;
Tendian176474 Field2;
NI Field3;
NI Field4;
};
typedef Tinfocpu176476 TY176510[19];
typedef NU8 Tsystemcpu176452;
struct Tstrentry147009 {
Tlistentry147007 Sup;
NimStringDesc* data;
};
struct TY128506 {
NimStringDesc* Field0;
NimStringDesc* Field1;
NimStringDesc* Field2;
};
struct Gcstack49816 {
Gcstack49816* prev;
Gcstack49816* next;
void* starts;
void* pos;
NI maxstacksize;
};
struct Basechunk29438 {
NI prevsize;
NI size;
NIM_BOOL used;
};
struct Smallchunk29440 {
Basechunk29438 Sup;
Smallchunk29440* next;
Smallchunk29440* prev;
Freecell29430* freelist;
NI free;
NI acc;
NF data;
};
struct Llchunk29480 {
NI size;
NI acc;
Llchunk29480* next;
};
struct Bigchunk29442 {
Basechunk29438 Sup;
Bigchunk29442* next;
Bigchunk29442* prev;
NI align;
NF data;
};
typedef NI TY29419[8];
struct Trunk29410 {
Trunk29410* next;
NI key;
TY29419 bits;
};
typedef Avlnode29484* TY29491[2];
struct Avlnode29484 {
TY29491 link;
NI key;
NI upperbound;
NI level;
};
struct Pagedesc47313 {
Pagedesc47313* next;
NI key;
TY29419 bits;
};
struct Trunk268026 {
Trunk268026* next;
NI key;
TY29419 bits;
};
struct Tidpair292846 {
Tidobj199004* key;
TNimObject* val;
};
struct Tnodepair292858 {
NI h;
Tnode292802* key;
NI val;
};
struct Filenamemapping203005 {
NimStringDesc* package;
NimStringDesc* file;
NU32 mangled;
};
typedef NU8 Treasonforrecompile332002;
struct Tiitable299142 {
NI counter;
Tiipairseq299140* data;
};
struct Tindex332019 {
NI lastidxkey;
NI lastidxval;
Tiitable299142 tab;
NimStringDesc* r;
NI offset;
};
struct Table332054 {
Keyvaluepairseq332057* data;
NI counter;
};
struct Memfile330202 {
void* mem;
NI size;
int handle;
};
struct Trodreader332021 {
TNimObject Sup;
NI pos;
NCSTRING s;
Toption169009Set options;
Treasonforrecompile332002 reason;
TY332033* moddeps;
TY332033* files;
NI dataidx;
NI convertersidx;
NI initidx;
NI interfidx;
NI compilerprocsidx;
NI methodsidx;
NimStringDesc* filename;
Tindex332019 index;
Tindex332019 imports;
NI readerindex;
NI line;
NI moduleid;
Table332054 syms;
Memfile330202 memfile;
Tsymseq292804* methods;
NimStringDesc* origfile;
NIM_BOOL inviewmode;
};
struct TY292961 {
NI Field0;
Tsym292834* Field1;
};
struct Freecell29430 {
Freecell29430* next;
NI zerofield;
};
struct Tinstantiation292824 {
Tsym292834* sym;
Ttypeseq292836* concretetypes;
NI compilesid;
};
struct Tiipair299138 {
NI key;
NI val;
};
struct Keyvaluepair332060 {
NI Field0;
NI Field1;
Tsym292834* Field2;
};
struct Ttypeseq292836 {
TGenericSeq Sup;
Ttype292840* data[SEQ_DECL_SIZE];
};
struct TY529153 {
TGenericSeq Sup;
Tcgen529027* data[SEQ_DECL_SIZE];
};
struct Tsymseq292804 {
TGenericSeq Sup;
Tsym292834* data[SEQ_DECL_SIZE];
};
struct TY203017 {
TGenericSeq Sup;
TY203018 data[SEQ_DECL_SIZE];
};
struct TY135002 {
TGenericSeq Sup;
NimStringDesc* data[SEQ_DECL_SIZE];
};
struct Tbitset339004 {
TGenericSeq Sup;
NI8 data[SEQ_DECL_SIZE];
};
struct TY529095 {
TGenericSeq Sup;
Tblock529019 data[SEQ_DECL_SIZE];
};
struct TY191350 {
TGenericSeq Sup;
Ropeobj178006* data[SEQ_DECL_SIZE];
};
struct Tnodeseq292796 {
TGenericSeq Sup;
Tnode292802* data[SEQ_DECL_SIZE];
};
struct TY191612 {
TGenericSeq Sup;
Tfileinfo191334 data[SEQ_DECL_SIZE];
};
struct Trunkseq268028 {
TGenericSeq Sup;
Trunk268026* data[SEQ_DECL_SIZE];
};
struct TY292929 {
TGenericSeq Sup;
Tinstantiation292824* data[SEQ_DECL_SIZE];
};
struct Tidpairseq292848 {
TGenericSeq Sup;
Tidpair292846 data[SEQ_DECL_SIZE];
};
struct Tnodepairseq292860 {
TGenericSeq Sup;
Tnodepair292858 data[SEQ_DECL_SIZE];
};
struct TY203021 {
TGenericSeq Sup;
Filenamemapping203005 data[SEQ_DECL_SIZE];
};
struct TY203023 {
TGenericSeq Sup;
Enumdesc203007 data[SEQ_DECL_SIZE];
};
struct TY292960 {
TGenericSeq Sup;
TY292961 data[SEQ_DECL_SIZE];
};
struct TY332033 {
TGenericSeq Sup;
NI32 data[SEQ_DECL_SIZE];
};
struct Tiipairseq299140 {
TGenericSeq Sup;
Tiipair299138 data[SEQ_DECL_SIZE];
};
struct Keyvaluepairseq332057 {
TGenericSeq Sup;
Keyvaluepair332060 data[SEQ_DECL_SIZE];
};
N_NIMCALL(void, nimGCvisit)(void* d0, NI op0);
N_NIMCALL(void, T839829468_2)(void);
N_NIMCALL(void, nimRegisterGlobalMarker)(Globalmarkerproc55802 markerproc0);
N_NIMCALL(void, T839829468_3)(void);
N_NIMCALL(Ropeobj178006*, rope_178277_2381377266)(NimStringDesc* s0);
static N_INLINE(void, asgnRefNoCycle)(void** dest0, void* src0);
static N_INLINE(Cell47305*, usrtocell_51440_1689653243)(void* usr0);
static N_INLINE(void, rtladdzct_52601_1689653243)(Cell47305* c0);
N_NOINLINE(void, addzct_51417_1689653243)(Cellseq47321* s0, Cell47305* c0);
N_NIMCALL(void, T839829468_5)(void);
N_NIMCALL(void, T839829468_6)(void);
static N_INLINE(void, nimGCunrefNoCycle)(void* p0);
N_NIMCALL(void*, newSeqRC1)(TNimType* typ0, NI len0);
N_NIMCALL(void, T839829468_7)(void);
N_NIMCALL(void, initintset_268885_2627731572)(Intset268030* Result);
N_NOINLINE(void, chckNil)(void* p0);
N_NIMCALL(void, genericReset)(void* dest0, TNimType* mt0);
N_NIMCALL(void, T839829468_8)(void);
N_NIMCALL(Tcgen529027*, newmodule_563044_839829468)(Tsym292834* module0);
N_NIMCALL(Tcgen529027*, getcgenmodule_532226_839829468)(Tsym292834* s0);
N_NIMCALL(void, internalerror_196113_155036129)(NimStringDesc* errmsg0);
N_NIMCALL(NimStringDesc*, HEX24_196185_1689653243)(TY203018 x0);
N_NIMCALL(Tcgen529027*, rawnewmodule_563038_839829468)(Tsym292834* module0);
N_NIMCALL(Tcgen529027*, rawnewmodule_562663_839829468)(Tsym292834* module0, NimStringDesc* filename0);
N_NIMCALL(void*, newObj)(TNimType* typ0, NI size0);
static N_INLINE(void, appendString)(NimStringDesc* dest0, NimStringDesc* src0);
static N_INLINE(void, copymem_7485_1689653243)(void* dest0, void* source0, NI size0);
N_NIMCALL(NimStringDesc*, HEX24_8401_1689653243)(NU64 x0);
N_NIMCALL(NU32, hashowner_532977_839829468)(Tsym292834* s0);
N_NIMCALL(NU32, register_203121_1926258066)(Debuginfo203009* self0, NimStringDesc* package0, NimStringDesc* file0);
N_NIMCALL(NimStringDesc*, rawNewString)(NI space0);
N_NIMCALL(void, initlinkedlist_147031_3771138726)(Tlinkedlist147013* list0);
N_NIMCALL(NimStringDesc*, copyStringRC1)(NimStringDesc* src0);
N_NIMCALL(void, initidtable_296019_850551059)(Tidtable292850* x0);
N_NIMCALL(Tcproc529021*, newproc_529206_3723162438)(Tsym292834* prc0, Tcgen529027* module0);
static N_INLINE(void, asgnRef)(void** dest0, void* src0);
static N_INLINE(void, incref_53419_1689653243)(Cell47305* c0);
static N_INLINE(void, decref_53001_1689653243)(Cell47305* c0);
N_NIMCALL(Toption169009Set, initprocoptions_562635_839829468)(Tcgen529027* m0);
N_NIMCALL(Tcproc529021*, newpreinitproc_562625_839829468)(Tcgen529027* m0);
N_NIMCALL(Tcproc529021*, newpostinitproc_562630_839829468)(Tcgen529027* m0);
N_NIMCALL(void, initnodetable_296085_850551059)(Tnodetable292862* x0);
N_NIMCALL(Ropeobj178006*, gettempname_533598_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, HEX26_178418_2381377266)(Ropeobj178006* a0, Ropeobj178006* b0);
N_NIMCALL(Ropeobj178006*, rope_178401_2381377266)(NI64 i0);
N_NIMCALL(NimStringDesc*, tofullpath_192261_155036129)(NI32 fileidx0);
N_NIMCALL(TGenericSeq*, setLengthSeq)(TGenericSeq* seq0, NI elemsize0, NI newlen0);
N_NIMCALL(NimStringDesc*, tofilename_192257_155036129)(NI32 fileidx0);
N_NIMCALL(NimStringDesc*, noschangeFileExt)(NimStringDesc* filename0, NimStringDesc* ext0);
N_NIMCALL(NimStringDesc*, completecfilepath_273854_2528170400)(NimStringDesc* cfile0, NIM_BOOL createsubdir0);
N_NIMCALL(void, readmergeinfo_530613_2760143328)(NimStringDesc* cfilename0, Tcgen529027* m0);
N_NIMCALL(NimStringDesc*, getcfile_563201_839829468)(Tcgen529027* m0);
N_NIMCALL(NimStringDesc*, copyString)(NimStringDesc* src0);
N_NIMCALL(NimStringDesc*, withpackagename_170073_2607990831)(NimStringDesc* path0);
static N_INLINE(NIM_BOOL, skipcodegen_341085_2355241294)(Tnode292802* n0);
N_NIMCALL(void, genstmts_539244_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(void, expr_539248_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, fillprocloc_539201_839829468)(Tsym292834* sym0);
N_NIMCALL(void, fillloc_532282_839829468)(Tloc292816* a0, Tlockind292808 k0, Ttype292840* typ0, Ropeobj178006* r0, Tstorageloc292812 s0);
N_NIMCALL(void, unsureAsgnRef)(void** dest0, void* src0);
N_NIMCALL(Ropeobj178006*, manglename_533205_839829468)(Tsym292834* s0);
N_NIMCALL(NIM_BOOL, iskeyword_532960_839829468)(Tident199010* w0);
N_NIMCALL(NimStringDesc*, mangle_528847_2036603609)(NimStringDesc* name0);
N_NIMCALL(void, add_178487_2381377266)(Ropeobj178006** a0, NimStringDesc* b0);
N_NIMCALL(void, add_178482_2381377266)(Ropeobj178006** a0, Ropeobj178006* b0);
N_NIMCALL(Ropeobj178006*, HEX25_178905_2381377266)(NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(void, genprocprototype_539254_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, useheader_532369_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(NIM_BOOL, includestr_147249_3771138726)(Tlinkedlist147013* list0, NimStringDesc* data0);
N_NIMCALL(NimStringDesc*, getstr_297230_850551059)(Tnode292802* a0);
N_NIMCALL(Tsym292834*, getmodule_299123_2984716966)(Tsym292834* s0);
N_NIMCALL(NIM_BOOL, containsorincl_268862_2627731572)(Intset268030* s0, NI key0);
N_NIMCALL(Ropeobj178006*, ropecg_532407_839829468)(Tcgen529027* m0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(NimStringDesc*, nimIntToStr)(NI x0);
static N_INLINE(void, appendChar)(NimStringDesc* dest0, NIM_CHAR c0);
N_NIMCALL(NimStringDesc*, copyStrLast)(NimStringDesc* s0, NI start_79210_1689653243, NI last0);
N_NIMCALL(NimStringDesc*, copyStrLast)(NimStringDesc* s0, NI first0, NI last0);
N_NIMCALL(Ropeobj178006*, cgsym_532403_839829468)(Tcgen529027* m0, NimStringDesc* name0);
N_NIMCALL(Tsym292834*, getcompilerproc_338748_3937434831)(NimStringDesc* name0);
N_NIMCALL(void, genproc_532951_839829468)(Tcgen529027* m0, Tsym292834* prc0);
N_NIMCALL(NIM_BOOL, isactivated_561431_839829468)(Tsym292834* prc0);
N_NIMCALL(void, addforwardedproc_532203_839829468)(Tcgen529027* m0, Tsym292834* prc0);
N_NIMCALL(TGenericSeq*, incrSeqV2)(TGenericSeq* seq0, NI elemsize0);
N_NIMCALL(void, genprocnoforward_560906_839829468)(Tcgen529027* m0, Tsym292834* prc0);
N_NIMCALL(void, genprocaux_560284_839829468)(Tcgen529027* m0, Tsym292834* prc0);
N_NIMCALL(Ropeobj178006*, genprocheader_535867_839829468)(Tcgen529027* m0, Tsym292834* prc0);
N_NIMCALL(void, genclinedir_532813_839829468)(Ropeobj178006** r0, Tlineinfo191336 info0);
N_NIMCALL(void, genclinedir_532725_839829468)(Ropeobj178006** r0, NimStringDesc* filename0, NI line0);
N_NIMCALL(void, addf_179205_2381377266)(Ropeobj178006** c0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(NimStringDesc*, makesinglelinecstring_528835_2036603609)(NimStringDesc* s0);
N_NIMCALL(NI, safelinenm_532721_839829468)(Tlineinfo191336 info0);
static N_INLINE(NI, tolinenumber_192415_155036129)(Tlineinfo191336 info0);
N_NIMCALL(void, genprocparams_534115_839829468)(Tcgen529027* m0, Ttype292840* t0, Ropeobj178006** rettype0, Ropeobj178006** params0, Intset268030* check0, NIM_BOOL declareenvironment0, NIM_BOOL weakdep0);
N_NIMCALL(NIM_BOOL, isinvalidreturntype_533550_839829468)(Ttype292840* rettype0);
N_NIMCALL(Tctypekind529007, maptype_533394_839829468)(Ttype292840* typ0);
N_NIMCALL(Tctypekind529007, mapsettype_533389_839829468)(Ttype292840* typ0);
N_NIMCALL(NI64, getsize_320135_3876443242)(Ttype292840* typ0);
N_NIMCALL(Ttype292840*, lastson_295377_850551059)(Ttype292840* n0);
N_NIMCALL(NI64, firstord_320001_3876443242)(Ttype292840* t0);
N_NIMCALL(Ttype292840*, skiptypes_296099_850551059)(Ttype292840* t0, Ttypekind292244Set kinds0);
N_NIMCALL(NIM_BOOL, isimportedcpptype_533478_839829468)(Ttype292840* t0);
N_NIMCALL(NIM_BOOL, needscomplexassignment_533511_839829468)(Ttype292840* typ0);
N_NIMCALL(NIM_BOOL, containsgarbagecollectedref_320117_3876443242)(Ttype292840* typ0);
static N_INLINE(NIM_BOOL, isobjlackingtypefield_533515_839829468)(Ttype292840* typ0);
N_NIMCALL(NIM_BOOL, ispureobject_320138_3876443242)(Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, gettypedescaux_533505_839829468)(Tcgen529027* m0, Ttype292840* typ0, Intset268030* check0);
N_NIMCALL(Ttype292840*, getuniquetype_528640_2036603609)(Ttype292840* key0);
N_NIMCALL(Ropeobj178006*, gettypepre_533972_839829468)(Tcgen529027* m0, Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, getsimpletypedesc_533936_839829468)(Tcgen529027* m0, Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, typenameorliteral_533898_839829468)(Ttype292840* t0, NimStringDesc* literal0);
N_NIMCALL(Ropeobj178006*, gettypename_533313_839829468)(Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, typename_533292_839829468)(Ttype292840* typ0);
N_NIMCALL(NimStringDesc*, reprEnum)(NI e0, TNimType* typ0);
N_NIMCALL(Ropeobj178006*, cachegettype_533593_839829468)(Tidtable292850 tab0, Ttype292840* key0);
N_NIMCALL(TNimObject*, idtableget_299086_2984716966)(Tidtable292850 t0, Tidobj199004* key0);
N_NIMCALL(NimStringDesc*, typetostring_320017_3876443242)(Ttype292840* typ0, Tprefereddesc320011 prefer0);
N_NIMCALL(Ttype292840*, elemtype_320394_3876443242)(Ttype292840* t0);
N_NIMCALL(Ropeobj178006*, HEX26_178447_2381377266)(Ropeobj178006* a0, NimStringDesc* b0);
N_NIMCALL(Ropeobj178006*, gettypeforward_534039_839829468)(Tcgen529027* m0, Ttype292840* typ0);
N_NIMCALL(NIM_BOOL, isimportedtype_533451_839829468)(Ttype292840* t0);
N_NIMCALL(NimStringDesc*, getforwardstructformat_534015_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, structorunion_534001_839829468)(Ttype292840* t0);
N_NIMCALL(void, idtableput_299094_2984716966)(Tidtable292850* t0, Tidobj199004* key0, TNimObject* val0);
N_NIMCALL(void, pushtype_533958_839829468)(Tcgen529027* m0, Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, gettypedescweak_534079_839829468)(Tcgen529027* m0, Ttype292840* t0, Intset268030* check0);
N_NIMCALL(void, internalerror_196100_155036129)(Tlineinfo191336 info0, NimStringDesc* errmsg0);
N_NIMCALL(NIM_BOOL, hasenum_203230_1926258066)(Debuginfo203009* self0, NimStringDesc* ename0, NI id0, NU32 owner0);
N_NIMCALL(void*, newSeq)(TNimType* typ0, NI len0);
static N_INLINE(NI, len_293081_850551059)(Tnode292802* n0);
N_NIMCALL(void, registerenum_203419_1926258066)(Debuginfo203009* self0, Enumdesc203007* ed0);
N_NIMCALL(void, genericSeqAssign)(void* dest0, void* src_86404_1689653243, TNimType* mt0);
N_NIMCALL(void, appcg_532632_839829468)(Tcgen529027* m0, Ropeobj178006** c0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(NI64, lengthord_320007_3876443242)(Ttype292840* t0);
N_NIMCALL(NIM_BOOL, scancppgenericslot_534827_839829468)(NimStringDesc* pat0, NI* cursor0, NI* outidx0, NI* outstars0);
N_NIMCALL(Ttype292840*, resolvestarsincpptype_534891_839829468)(Ttype292840* typ0, NI idx0, NI stars0);
N_NIMCALL(NI, len_295339_850551059)(Ttype292840* n0);
N_NIMCALL(NimStringDesc*, copyStr)(NimStringDesc* s0, NI start0);
N_NIMCALL(NimStringDesc*, copyStr)(NimStringDesc* s0, NI first0);
N_NIMCALL(Ropeobj178006*, getrecorddesc_534643_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0, Intset268030* check0);
N_NIMCALL(Ropeobj178006*, getrecordfields_534636_839829468)(Tcgen529027* m0, Ttype292840* typ0, Intset268030* check0);
N_NIMCALL(Ropeobj178006*, genrecordfieldsaux_534421_839829468)(Tcgen529027* m0, Tnode292802* n0, Ropeobj178006* accessexpr0, Ttype292840* rectype0, Intset268030* check0);
N_NIMCALL(NI, sonslen_295351_850551059)(Tnode292802* n0);
N_NIMCALL(Tnode292802*, lastson_295364_850551059)(Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, HEX26_178452_2381377266)(NimStringDesc* a0, Ropeobj178006* b0);
N_NIMCALL(Ropeobj178006*, manglerecfieldname_534361_839829468)(Tsym292834* field0, Ttype292840* rectype0);
N_NIMCALL(NimStringDesc*, manglefield_532973_839829468)(Tident199010* name0);
N_NIMCALL(NIM_CHAR, nsuToUpperAsciiChar)(NIM_CHAR c0);
N_NIMCALL(Ropeobj178006*, gettupledesc_534777_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0, Intset268030* check0);
N_NIMCALL(NI, sonslen_295327_850551059)(Ttype292840* n0);
N_NIMCALL(void, excl_268841_2627731572)(Intset268030* s0, NI key0);
static N_INLINE(NIM_BOOL, iscompiletimeonly_328706_3876443242)(Ttype292840* t0);
N_NIMCALL(Tstorageloc292812, paramstorageloc_534098_839829468)(Tsym292834* param0);
N_NIMCALL(NIM_BOOL, ccgintroducedptr_533611_839829468)(Tsym292834* s0);
N_NIMCALL(Tctypekind529007, mapreturntype_533447_839829468)(Ttype292840* typ0);
N_NIMCALL(Tnode292802*, easyresultasgn_560191_839829468)(Tnode292802* n0);
static N_INLINE(Tnode292802*, HEX5BHEX5D_293238_850551059)(Tnode292802* n0, NI i0);
N_NIMCALL(Tnode292802*, getbody_335226_1724185294)(Tsym292834* s0);
N_NIMCALL(Ropeobj178006*, localvardecl_538532_839829468)(Tcproc529021* p0, Tsym292834* s0);
N_NIMCALL(Ropeobj178006*, gettypedesc_535673_839829468)(Tcgen529027* m0, Ttype292840* typ0);
N_NIMCALL(void, initlocexprsingleuse_539289_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* result0);
N_NIMCALL(void, initloc_532273_839829468)(Tloc292816* result0, Tlockind292808 k0, Ttype292840* typ0, Tstorageloc292812 s0);
N_NIMCALL(void, linefmt_532714_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
static N_INLINE(Ropeobj178006**, s_529179_3723162438)(Tcproc529021* p0, Tcprocsection529011 s0);
N_NIMCALL(Ropeobj178006*, indentline_532656_839829468)(Tcproc529021* p0, Ropeobj178006* r0);
N_NIMCALL(void, prepend_178893_2381377266)(Ropeobj178006** a0, Ropeobj178006* b0);
N_NIMCALL(Ropeobj178006*, rdloc_538188_839829468)(Tloc292816* a0);
N_NIMCALL(void, assignlocalvar_538614_839829468)(Tcproc529021* p0, Tsym292834* s0);
N_NIMCALL(void, line_532690_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, Ropeobj178006* r0);
N_NIMCALL(void, localdebuginfo_538449_839829468)(Tcproc529021* p0, Tsym292834* s0);
N_NIMCALL(void, linef_532700_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(Ropeobj178006*, makecstring_191638_155036129)(NimStringDesc* s0);
N_NIMCALL(NimStringDesc*, nsuNormalize)(NimStringDesc* s0);
N_NIMCALL(Ropeobj178006*, gentypeinfo_535941_839829468)(Tcgen529027* m0, Ttype292840* t_535944_839829468);
N_NIMCALL(Tcgen529027*, bmod_529201_3723162438)(Tsym292834* module0);
N_NIMCALL(void, gentypeinfoauxbase_535960_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0, Ropeobj178006* base0);
N_NIMCALL(NIM_BOOL, canformacycle_320123_3876443242)(Ttype292840* typ0);
N_NIMCALL(void, gentupleinfo_536551_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0);
N_NIMCALL(Ropeobj178006*, getnimnode_535945_839829468)(Tcgen529027* m0);
N_NIMCALL(Ttype292840*, fakeclosuretype_537010_839829468)(Tsym292834* owner0);
N_NIMCALL(Ttype292840*, newtype_295107_850551059)(Ttypekind292244 kind0, Tsym292834* owner0);
N_NIMCALL(void, rawaddson_296394_850551059)(Ttype292840* father0, Ttype292840* son0);
N_NIMCALL(void, gentypeinfoaux_536027_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0);
N_NIMCALL(Ropeobj178006*, gentraverseproc_537632_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttypeinforeason537016 reason0);
N_NIMCALL(void, gentraverseprocseq_537399_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Ttype292840* typ0);
N_NIMCALL(void, gettemp_537032_839829468)(Tcproc529021* p0, Ttype292840* t0, Tloc292816* result0, NIM_BOOL needsinit0);
N_NIMCALL(void, constructloc_538388_839829468)(Tcproc529021* p0, Tloc292816* loc0, NIM_BOOL istemp0);
static N_INLINE(NIM_BOOL, iscomplexvaluetype_538317_839829468)(Ttype292840* t0);
N_NIMCALL(void, usestringh_532345_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, addrloc_538204_839829468)(Tloc292816* a0);
N_NIMCALL(void, genobjectinit_538242_839829468)(Tcproc529021* p0, Tcprocsection529011 section0, Ttype292840* t0, Tloc292816* a0, NIM_BOOL takeaddr0);
N_NIMCALL(Ttypefieldresult320145, analyseobjectwithtypefield_320149_3876443242)(Ttype292840* t0);
N_NIMCALL(Ttype292840*, getsystype_338150_3937434831)(Ttypekind292244 kind0);
N_NIMCALL(void, gentraverseproc_537022_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Ttype292840* typ_537027_839829468);
static N_INLINE(Ropeobj178006*, parentobj_537257_839829468)(Ropeobj178006* accessor0, Tcgen529027* m0);
N_NIMCALL(void, gentraverseproc_537039_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Tnode292802* n0);
N_NIMCALL(void, gencaserange_537028_839829468)(Tcproc529021* p0, Tnode292802* branch0);
N_NIMCALL(Ropeobj178006*, genliteral_539273_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, genliteral_549476_839829468)(Tcproc529021* p0, Tnode292802* n0, Ttype292840* ty0);
N_NIMCALL(Ropeobj178006*, intliteral_539270_839829468)(NI64 i0);
N_NIMCALL(Ropeobj178006*, int64literal_549430_839829468)(NI64 i0);
N_NIMCALL(Ropeobj178006*, uint64literal_549442_839829468)(NU64 i0);
N_NIMCALL(NI, nodetabletestorset_342682_1142335848)(Tnodetable292862* t0, Tnode292802* key0, NI val0);
N_NIMCALL(Ropeobj178006*, getstrlit_549468_839829468)(Tcgen529027* m0, NimStringDesc* s0);
N_NIMCALL(NimStringDesc*, tostrmaxprecision_298007_3471544153)(NF f0);
N_NIMCALL(Tnode292802*, copynode_296528_850551059)(Tnode292802* src0);
N_NIMCALL(void, linecg_532707_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(void, genarrayinfo_537005_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0);
N_NIMCALL(void, gensetinfo_536867_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0);
N_NIMCALL(void, genenuminfo_536599_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0);
N_NIMCALL(void, genobjectinfo_536508_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0);
N_NIMCALL(void, genobjectfields_536104_839829468)(Tcgen529027* m0, Ttype292840* typ0, Tnode292802* n0, Ropeobj178006* expr0);
N_NIMCALL(Ropeobj178006*, discriminatortablename_536057_839829468)(Tcgen529027* m0, Ttype292840* objtype_536060_839829468, Tsym292834* d0);
N_NIMCALL(Tsym292834*, lookupinrecord_299119_2984716966)(Tnode292802* n0, Tident199010* field0);
N_NIMCALL(NI64, getordvalue_320129_3876443242)(Tnode292802* n0);
N_NIMCALL(void, gendeepcopyproc_538066_839829468)(Tcgen529027* m0, Tsym292834* s0, Ropeobj178006* result0);
N_NIMCALL(void, initlocalvar_538398_839829468)(Tcproc529021* p0, Tsym292834* v0, NIM_BOOL immediateasgn0);
N_NIMCALL(void, fillresult_533865_839829468)(Tsym292834* param0);
N_NIMCALL(void, assignparam_538994_839829468)(Tcproc529021* p0, Tsym292834* s0);
N_NIMCALL(void, closuresetup_560158_839829468)(Tcproc529021* p0, Tsym292834* prc0);
N_NIMCALL(Ropeobj178006*, initgcframe_538435_839829468)(Tcproc529021* p0);
N_NIMCALL(Ropeobj178006*, initframe_560140_839829468)(Tcproc529021* p0, Ropeobj178006* procname0, Ropeobj178006* filename0);
N_NIMCALL(Ropeobj178006*, quotedfilename_196818_155036129)(Tlineinfo191336 i0);
N_NIMCALL(void, appcg_532648_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(Ropeobj178006*, deinitgcframe_538441_839829468)(Tcproc529021* p0);
N_NIMCALL(Ropeobj178006*, deinitframe_560150_839829468)(Tcproc529021* p0);
N_NIMCALL(Tcgen529027*, findpendingmodule_532241_839829468)(Tcgen529027* m0, Tsym292834* s0);
N_NIMCALL(void, symindynamiclib_559929_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(NIM_BOOL, isgetprocaddr_559443_839829468)(Tlib292820* lib0);
N_NIMCALL(void, loaddynamiclib_559481_839829468)(Tcgen529027* m0, Tlib292820* lib0);
N_NIMCALL(void, libcandidates_170605_2607990831)(NimStringDesc* s0, TY135002** dest0);
N_NIMCALL(void, rawmessage_194612_155036129)(Tmsgkind191002 msg0, NimStringDesc* arg0);
N_NIMCALL(void, initlocexpr_539283_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* result0);
N_NIMCALL(Ropeobj178006*, mangledynlibproc_538816_839829468)(Tsym292834* sym0);
N_NIMCALL(NimStringDesc*, HEX24_178856_2381377266)(Ropeobj178006* r0);
N_NIMCALL(void, symindynamiclibpartial_560071_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, genvarprototype_539236_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, genvarprototypeaux_544254_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, declarethreadvar_538676_839829468)(Tcgen529027* m0, Tsym292834* s0, NIM_BOOL isextern0);
static N_INLINE(NIM_BOOL, emulatedthreadvars_532949_839829468)(void);
static N_INLINE(NIM_BOOL, crossescppboundary_560754_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, putlocintodest_539258_839829468)(Tcproc529021* p0, Tloc292816* d0, Tloc292816* s0);
N_NIMCALL(void, genassignment_539264_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0);
N_NIMCALL(void, genrefassign_538311_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0);
static N_INLINE(NIM_BOOL, usesnativegc_169177_2607990831)(void);
N_NIMCALL(void, optasgnloc_549789_839829468)(Tloc292816* a0, Ttype292840* t0, Ropeobj178006* field0, Tloc292816* Result);
N_NIMCALL(void, genoptasgntuple_550001_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0);
N_NIMCALL(void, gengenericasgn_550167_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0);
N_NIMCALL(NI, asgncomplexity_549751_839829468)(Tnode292802* n0);
N_NIMCALL(void, genoptasgnobject_550084_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0, Tnode292802* t0);
N_NIMCALL(void, genericAssign)(void* dest0, void* src0, TNimType* mt0);
N_NIMCALL(void, localerror_196085_155036129)(Tlineinfo191336 info0, NimStringDesc* arg0);
N_NIMCALL(NIM_BOOL, issimpleconst_532311_839829468)(Ttype292840* typ0);
N_NIMCALL(void, putintodest_550468_839829468)(Tcproc529021* p0, Tloc292816* d0, Ttype292840* t0, Ropeobj178006* r0, Tstorageloc292812 s0);
N_NIMCALL(void, gencomplexconst_558249_839829468)(Tcproc529021* p0, Tsym292834* sym0, Tloc292816* d0);
N_NIMCALL(void, requestconstimpl_539240_839829468)(Tcproc529021* p0, Tsym292834* sym0);
N_NIMCALL(Ropeobj178006*, genconstexpr_554849_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, tobitset_340001_452470228)(Tnode292802* s0, Tbitset339004** b0);
N_NIMCALL(Ropeobj178006*, genrawsetdata_549629_839829468)(Tbitset339004* cs0, NI size0);
N_NIMCALL(NimStringDesc*, nsuToHex)(NI64 x0, NI len0);
N_NIMCALL(NI64, bitsettoword_549578_839829468)(Tbitset339004* s0, NI size0);
N_NIMCALL(Ropeobj178006*, genconstseq_559371_839829468)(Tcproc529021* p0, Tnode292802* n0, Ttype292840* t0);
N_NIMCALL(void, appcg_532640_839829468)(Tcgen529027* m0, Tcfilesection529005 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(Ropeobj178006*, genconstsimplelist_559299_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, gennamedconstexpr_559284_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, accessthreadlocalvar_532945_839829468)(Tcproc529021* p0, Tsym292834* s0);
static N_INLINE(Ropeobj178006**, procsec_529194_3723162438)(Tcproc529021* p0, Tcprocsection529011 s0);
static N_INLINE(NIM_BOOL, isemptytype_297441_850551059)(Ttype292840* t0);
N_NIMCALL(void, putdataintodest_550436_839829468)(Tcproc529021* p0, Tloc292816* d0, Ttype292840* t0, Ropeobj178006* r0);
N_NIMCALL(void, genlinedir_532823_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(Ropeobj178006*, sourceline_192065_155036129)(Tlineinfo191336 i0);
N_NIMCALL(NIM_BOOL, freshlineinfo_532818_839829468)(Tcproc529021* p0, Tlineinfo191336 info0);
N_NIMCALL(void, genmagicexpr_557033_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0);
N_NIMCALL(void, genandor_554311_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0);
N_NIMCALL(Ropeobj178006*, getlabel_539217_839829468)(Tcproc529021* p0);
N_NIMCALL(void, fixlabel_539230_839829468)(Tcproc529021* p0, Ropeobj178006* labl0);
N_NIMCALL(void, unaryarith_552646_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0);
N_NIMCALL(void, unaryarithoverflow_551633_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0);
N_NIMCALL(void, binaryfloatarith_556729_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0);
N_NIMCALL(void, binaryarith_551819_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0);
N_NIMCALL(void, geneqproc_552214_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, binaryarithoverflow_551262_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0);
N_NIMCALL(Ropeobj178006*, binaryarithoverflowraw_551235_839829468)(Tcproc529021* p0, Ttype292840* t0, Tloc292816* a0, Tloc292816* b0, NimStringDesc* frmt0);
N_NIMCALL(Ropeobj178006*, rdcharloc_538227_839829468)(Tloc292816* a0);
N_NIMCALL(NI64, lastord_320004_3876443242)(Ttype292840* t0);
N_NIMCALL(void, genrepr_555339_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(Ropeobj178006*, lenfield_539305_839829468)(Tcproc529021* p0);
N_NIMCALL(void, gcusage_554439_839829468)(Tnode292802* n0);
N_NIMCALL(void, message_196095_155036129)(Tlineinfo191336 info0, Tmsgkind191002 msg0, NimStringDesc* arg0);
N_NIMCALL(NimStringDesc*, rendertree_311044_382274130)(Tnode292802* n0, Trenderflag311004Set renderflags0);
N_NIMCALL(void, gengettypeinfo_555383_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genswap_555638_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, unaryexpr_551209_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, binarystmt_550501_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, genstrconcat_554452_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genstrappend_554554_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genseqelemappend_554683_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genstrequals_556667_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, binaryexpr_550549_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, genisnil_552620_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, gendollar_555391_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, genof_555331_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genof_555201_839829468)(Tcproc529021* p0, Tnode292802* x0, Ttype292840* typ0, Tloc292816* d0);
N_NIMCALL(void, globalerror_196071_155036129)(Tlineinfo191336 info0, Tmsgkind191002 msg0, NimStringDesc* arg0);
N_NIMCALL(Ropeobj178006*, genofhelper_555140_839829468)(Tcproc529021* p0, Ttype292840* dest0, Ropeobj178006* a0);
N_NIMCALL(void, gennew_554782_839829468)(Tcproc529021* p0, Tnode292802* e0);
N_NIMCALL(void, rawgennew_554741_839829468)(Tcproc529021* p0, Tloc292816* a0, Ropeobj178006* sizeexpr_554745_839829468);
N_NIMCALL(void, gennewfinalize_555111_839829468)(Tcproc529021* p0, Tnode292802* e0);
N_NIMCALL(void, gennewseq_554824_839829468)(Tcproc529021* p0, Tnode292802* e0);
N_NIMCALL(void, gennewseqaux_554795_839829468)(Tcproc529021* p0, Tloc292816* dest0, Ropeobj178006* length0);
N_NIMCALL(void, gennewseqofcap_554836_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, gensomecast_556481_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(Ropeobj178006*, getclosuretype_535685_839829468)(Tcgen529027* m0, Ttype292840* t0, Tclosuretypekind535681 kind0);
N_NIMCALL(void, genord_556475_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, unaryexprchar_551222_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, genarraylen_555415_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0);
N_NIMCALL(void, unarystmt_550527_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, gensetlengthstr_555632_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, gensetlengthseq_555500_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, gensetop_556419_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0);
N_NIMCALL(void, binarystmtinexcl_555858_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(Ropeobj178006*, rdsetelemloc_555662_839829468)(Tloc292816* a0, Ttype292840* settype0);
N_NIMCALL(void, binaryexprchar_550809_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, geninop_556009_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, fewcmps_555803_839829468)(Tnode292802* s0);
N_NIMCALL(void, geninexpraux_553496_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* a0, Tloc292816* b0, Tloc292816* d0);
N_NIMCALL(void, binaryexprin_555837_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* a0, Tloc292816* b0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, gencall_543632_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genclosurecall_540452_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0);
N_NIMCALL(Ropeobj178006*, genarg_539787_839829468)(Tcproc529021* p0, Tnode292802* n_539790_839829468, Tsym292834* param0, Tnode292802* call0);
static N_INLINE(Ropeobj178006*, genargstringtocstring_539776_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, openarrayloc_539665_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Tnode292802*, skipconv_328882_3876443242)(Tnode292802* n0);
N_NIMCALL(Tmagic292524, getmagic_318502_2616423590)(Tnode292802* op0);
N_NIMCALL(Ropeobj178006*, genargnoparam_539938_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, getrawproctype_540459_839829468)(Tcproc529021* p0, Ttype292840* t0);
N_NIMCALL(NIM_BOOL, leftappearsonrightside_539329_839829468)(Tnode292802* le0, Tnode292802* ri0);
N_NIMCALL(Tanalysisresult473003, ispartof_473340_788060399)(Tnode292802* a0, Tnode292802* b0);
static N_INLINE(NIM_BOOL, hasnoinit_539383_839829468)(Tnode292802* call0);
N_NIMCALL(void, resetloc_538350_839829468)(Tcproc529021* p0, Tloc292816* loc0);
N_NIMCALL(Ropeobj178006*, addcomma_540464_839829468)(Ropeobj178006* r0);
N_NIMCALL(void, geninfixcall_541929_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, contains_110056_4286263276)(NimStringDesc* s0, char136Set chars0);
N_NIMCALL(Ropeobj178006*, genpatterncall_541699_839829468)(Tcproc529021* p0, Tnode292802* ri_541702_839829468, NimStringDesc* pat0, Ttype292840* typ_541704_839829468);
N_NIMCALL(Ropeobj178006*, genotherarg_539277_839829468)(Tcproc529021* p0, Tnode292802* ri0, NI i0, Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, genthisarg_541475_839829468)(Tcproc529021* p0, Tnode292802* ri_541478_839829468, NI i0, Ttype292840* typ0);
N_NIMCALL(Tnode292802*, skipaddrderef_541433_839829468)(Tnode292802* node0);
N_NIMCALL(void, fixupcall_539410_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0, Ropeobj178006* callee0, Ropeobj178006* params0);
N_NIMCALL(void, gennamedparamcall_542616_839829468)(Tcproc529021* p0, Tnode292802* ri0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, contains_110046_4286263276)(NimStringDesc* s0, NIM_CHAR c0);
N_NIMCALL(void, genprefixcall_539960_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0);
static N_INLINE(void, poststmtactions_532942_839829468)(Tcproc529021* p0);
N_NIMCALL(void, genreset_554731_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, genecho_554369_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(NimStringDesc*, nsuRepeatStr)(NimStringDesc* s0, NI n0);
N_NIMCALL(void, genarrtoseq_555046_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
N_NIMCALL(void, genseqconstr_555004_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
N_NIMCALL(void, localerror_196080_155036129)(Tlineinfo191336 info0, Tmsgkind191002 msg0, NimStringDesc* arg0);
N_NIMCALL(Tnode292802*, wrapprocforspawn_435501_2218250499)(Tsym292834* owner0, Tnode292802* spawnexpr0, Ttype292840* rettype0, Tnode292802* barrier0, Tnode292802* dest0);
N_NIMCALL(Tnode292802*, liftparallel_478822_1773027539)(Tsym292834* owner0, Tnode292802* n0);
N_NIMCALL(void, gendeepcopy_550374_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0);
N_NIMCALL(NIM_BOOL, isdeepconstexpr_318566_2616423590)(Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, gensetnode_549664_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, gensetconstr_557496_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(NimStringDesc*, nimInt64ToStr)(NI64 x0);
N_NIMCALL(void, exprcomplexconst_558684_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genarrayconstr_558207_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, handleconstexpr_554853_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, gentupleconstr_557618_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genobjconstr_554903_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(Tsym292834*, lookupfieldagain_553154_839829468)(Tcproc529021* p0, Ttype292840* ty_553157_839829468, Tsym292834* field0, Ropeobj178006** r0);
N_NIMCALL(void, genfieldcheck_553504_839829468)(Tcproc529021* p0, Tnode292802* e0, Ropeobj178006* obj0, Tsym292834* field0, Ttype292840* origty0);
N_NIMCALL(Tnode292802*, newstrnode_293677_850551059)(Tnodekind292020 kind0, NimStringDesc* strval0);
N_NIMCALL(void, gencast_556538_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genconv_556633_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, comparetypes_326214_3876443242)(Ttype292840* x0, Ttype292840* y0, Tdistinctcompare324427 cmp0, Ttypecmpflag324429Set flags0);
N_NIMCALL(void, genaddr_553051_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
static N_INLINE(NIM_BOOL, iscppref_552807_839829468)(Tcproc529021* p0, Ttype292840* typ0);
N_NIMCALL(void, genbracketexpr_554277_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genarrayelem_554093_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, isconstexpr_318510_2616423590)(Tnode292802* n0);
N_NIMCALL(void, genopenarrayelem_554169_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0);
N_NIMCALL(void, genseqelem_554205_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0);
N_NIMCALL(void, gencstringelem_554144_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0);
N_NIMCALL(void, gentupleelem_553124_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genderef_543921_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NIM_BOOL enforcederef0);
N_NIMCALL(void, genrecordfield_553448_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(Ttype292840*, genrecordfieldaux_553096_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tloc292816* a0);
N_NIMCALL(void, gencheckedrecordfield_554046_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genblock_546083_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(NI, startblock_543978_839829468)(Tcproc529021* p0, NimStringDesc* start0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(void, endblock_544060_839829468)(Tcproc529021* p0);
N_NIMCALL(void, endblock_544035_839829468)(Tcproc529021* p0, Ropeobj178006* blockend0);
N_NIMCALL(Ropeobj178006*, blockbody_544025_839829468)(Tblock529019* b0);
N_NIMCALL(void, genstmtlistexpr_558402_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genif_544982_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, downconv_558581_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(NI, inheritancediff_326252_3876443242)(Ttype292840* a0, Ttype292840* b0);
N_NIMCALL(void, upconv_558431_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genrangechck_556591_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0, NimStringDesc* magic0);
N_NIMCALL(void, convstrtocstr_556643_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, convcstrtostr_556655_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genclosure_557836_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
static N_INLINE(NIM_BOOL, isconstclosure_557810_839829468)(Tnode292802* n0);
static N_INLINE(NIM_BOOL, isroutine_297324_850551059)(Tsym292834* s0);
N_NIMCALL(void, genwhilestmt_545985_839829468)(Tcproc529021* p0, Tnode292802* t0);
static N_INLINE(Ropeobj178006*, assignlabel_544020_839829468)(Tblock529019* b0);
N_NIMCALL(NIM_BOOL, stmtscontainpragma_528083_2036603609)(Tnode292802* n0, Tspecialword275003 w0);
N_NIMCALL(void, gencomputedgoto_545744_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, genvarstmt_544854_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, gensinglevar_544276_839829468)(Tcproc529021* p0, Tnode292802* a0);
N_NIMCALL(void, gengotovar_544258_839829468)(Tcproc529021* p0, Tnode292802* value0);
N_NIMCALL(void, assignglobalvar_538819_839829468)(Tcproc529021* p0, Tsym292834* s0);
N_NIMCALL(void, varindynamiclib_538812_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, registergcroot_543762_839829468)(Tcproc529021* p0, Tsym292834* v0);
N_NIMCALL(Ropeobj178006*, gentraverseprocforglobal_538032_839829468)(Tcgen529027* m0, Tsym292834* s0);
static N_INLINE(NIM_BOOL, isassignedimmediately_543781_839829468)(Tnode292802* n0);
N_NIMCALL(NIM_BOOL, containshiddenpointer_320120_3876443242)(Ttype292840* typ0);
static N_INLINE(void, loadinto_543928_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* a0);
N_NIMCALL(void, genasgncall_543695_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0);
N_NIMCALL(void, genclosurevar_544832_839829468)(Tcproc529021* p0, Tnode292802* a0);
N_NIMCALL(void, genvartuple_543794_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Tnode292802*, lowertupleunpacking_433037_2218250499)(Tnode292802* n0, Tsym292834* owner0);
N_NIMCALL(void, genconststmt_544909_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(NIM_BOOL, containscompiletimeonly_328721_3876443242)(Ttype292840* t0);
static N_INLINE(NIM_BOOL, emitlazily_532248_839829468)(Tsym292834* s0);
N_NIMCALL(void, gencase_547827_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
N_NIMCALL(void, genstringcase_547417_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
N_NIMCALL(NI, nextpoweroftwo_101629_1009420244)(NI x0);
N_NIMCALL(void, gencasestringbranch_547100_839829468)(Tcproc529021* p0, Tnode292802* b0, Tloc292816* e0, Ropeobj178006* labl0, Ropeobj178006** branches0, NI branches0Len0);
N_NIMCALL(NI64, hashstring_528100_2036603609)(NimStringDesc* s0);
N_NIMCALL(Ropeobj178006*, gencasesecondpass_546965_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NI labid0, NI until0);
N_NIMCALL(void, exprblock_544103_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, gencasegeneric_547087_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0);
N_NIMCALL(Ropeobj178006*, genifforcaseuntil_547021_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, NI until0, Tloc292816* a0);
N_NIMCALL(void, gencasegenericbranch_546910_839829468)(Tcproc529021* p0, Tnode292802* b0, Tloc292816* e0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, Ropeobj178006* labl0);
N_NIMCALL(void, gengotoforcase_545673_839829468)(Tcproc529021* p0, Tnode292802* casestmt0);
N_NIMCALL(void, genordinalcase_547725_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(NI, ifswitchsplitpoint_547616_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(NIM_BOOL, branchhastoobigrange_547576_839829468)(Tnode292802* b0);
N_NIMCALL(void, genreturnstmt_545617_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(void, blockleaveactions_545442_839829468)(Tcproc529021* p0, NI howmanytrys0, NI howmanyexcepts0);
static N_INLINE(Tnode292802*, pop_318246_1689653243)(Tnodeseq292796** s0);
N_NIMCALL(void, genbreakstmt_546444_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(void, genasgn_549239_839829468)(Tcproc529021* p0, Tnode292802* e0, NIM_BOOL fastasgn0);
N_NIMCALL(NIM_BOOL, fielddiscriminantcheckneeded_549080_839829468)(Tcproc529021* p0, Tnode292802* asgn0);
N_NIMCALL(void, asgnfielddiscriminant_549209_839829468)(Tcproc529021* p0, Tnode292802* e0);
N_NIMCALL(void, gendiscriminantcheck_549144_839829468)(Tcproc529021* p0, Tloc292816* a0, Tloc292816* tmp0, Ttype292840* objtype0, Tsym292834* field0);
N_NIMCALL(Ropeobj178006*, discriminatortabledecl_536094_839829468)(Tcgen529027* m0, Ttype292840* objtype0, Tsym292834* d0);
N_NIMCALL(void, genasmstmt_548659_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(Ropeobj178006*, genasmoremitstmt_548529_839829468)(Tcproc529021* p0, Tnode292802* t0, NIM_BOOL isasmstmt0);
N_NIMCALL(NimStringDesc*, resizeString)(NimStringDesc* dest0, NI addlen0);
N_NIMCALL(void, gentrycpp_547866_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
static N_INLINE(void, gensimpleblock_544095_839829468)(Tcproc529021* p0, Tnode292802* stmts0);
N_NIMCALL(void, gentry_548114_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, isdefined_200011_1967573533)(NimStringDesc* symbol0);
N_NIMCALL(void, line_532695_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* r0);
static N_INLINE(Ropeobj178006*, pop_178530_1689653243)(TY191350** s0);
N_NIMCALL(void, genraisestmt_546828_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(NimStringDesc*, getraisefrmt_546824_839829468)(Tcproc529021* p0);
N_NIMCALL(void, gentypesection_538184_839829468)(Tcgen529027* m0, Tnode292802* n0);
N_NIMCALL(void, genpragma_549039_839829468)(Tcproc529021* p_549041_839829468, Tnode292802* n0);
N_NIMCALL(Tspecialword275003, whichpragma_318911_2616423590)(Tnode292802* n0);
N_NIMCALL(void, genemit_548839_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(Tcfilesection529005, determinesection_548819_839829468)(Tnode292802* n0);
N_NIMCALL(NIM_BOOL, nsuStartsWith)(NimStringDesc* s0, NimStringDesc* prefix0);
N_NIMCALL(void, genbreakpoint_548862_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(void, genwatchpoint_549016_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Tsym292834*, skipgenericowner_297280_850551059)(Tsym292834* s0);
N_NIMCALL(void, genparforstmt_546208_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(void, genstate_544117_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, gengotostate_544144_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, genbreakstate_544229_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, registermoduletomain_562243_839829468)(Tsym292834* m0);
N_NIMCALL(Ropeobj178006*, getinitname_562235_839829468)(Tsym292834* m0);
N_NIMCALL(Ropeobj178006*, getsomeinitname_561904_839829468)(Tsym292834* m0, NimStringDesc* suffix0);
N_NIMCALL(Ropeobj178006*, getdatinitname_562239_839829468)(Tsym292834* m0);
N_NIMCALL(Tnode292802*, generatemethoddispatchers_432151_3853300031)(void);
N_NIMCALL(void, genmainproc_561729_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, genfilenames_561688_839829468)(Tcgen529027* m0);
N_NIMCALL(void, finishmodule_563420_839829468)(Tcgen529027* m0);
N_NIMCALL(void, updatecachedmodule_563813_839829468)(Tcgen529027* m0);
N_NIMCALL(NIM_BOOL, mergerequired_530832_2760143328)(Tcgen529027* m0);
N_NIMCALL(void, mergefiles_531241_2760143328)(NimStringDesc* cfilename0, Tcgen529027* m0);
N_NIMCALL(void, geninitcode_562286_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, gensectionstart_530081_2760143328)(Tcprocsection529011 ps0);
N_NIMCALL(Ropeobj178006*, gensectionend_530116_2760143328)(Tcprocsection529011 ps0);
N_NIMCALL(Ropeobj178006*, gensectionstart_530015_2760143328)(Tcfilesection529005 fs0);
N_NIMCALL(Ropeobj178006*, gensectionend_530050_2760143328)(Tcfilesection529005 fs0);
N_NIMCALL(void, finishtypedescriptions_535842_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, genmodule_562491_839829468)(Tcgen529027* m0, NimStringDesc* cfile0);
N_NIMCALL(Ropeobj178006*, getfileheader_561683_839829468)(NimStringDesc* cfile0);
N_NIMCALL(Ropeobj178006*, getcopyright_561665_839829468)(NimStringDesc* cfile0);
N_NIMCALL(NimStringDesc*, getcompilecfilecmd_274284_2528170400)(NimStringDesc* cfilename0, NIM_BOOL isexternal0);
static N_INLINE(void, addinttypes_561659_839829468)(Ropeobj178006** result0);
N_NIMCALL(Ropeobj178006*, genmergeinfo_530203_2760143328)(Tcgen529027* m0);
N_NIMCALL(void, generatethreadlocalstorage_538717_839829468)(Tcgen529027* m0);
N_NIMCALL(void, generateheaders_560104_839829468)(Tcgen529027* m0);
N_NIMCALL(NimStringDesc*, nsuReplaceChar)(NimStringDesc* s0, NIM_CHAR sub0, NIM_CHAR by0);
N_NIMCALL(void, writerope_178836_2381377266)(Ropeobj178006* head0, NimStringDesc* filename0, NIM_BOOL usewarning0);
N_NIMCALL(void, addfiletocompile_273863_2528170400)(NimStringDesc* filename0);
N_NIMCALL(void, addfiletolink_273872_2528170400)(NimStringDesc* filename0);
N_NIMCALL(void, writemodule_563637_839829468)(Tcgen529027* m0, NIM_BOOL pending0);
N_NIMCALL(void, generatethreadvarssize_538771_839829468)(Tcgen529027* m0);
N_NIMCALL(NIM_BOOL, shouldrecompile_563621_839829468)(Ropeobj178006* code0, NimStringDesc* cfile0);
N_NIMCALL(NimStringDesc*, toobjfile_273859_2528170400)(NimStringDesc* filename0);
N_NIMCALL(NIM_BOOL, writeropeifnotequal_179511_2381377266)(Ropeobj178006* r0, NimStringDesc* filename0);
N_NIMCALL(NIM_BOOL, nosexistsFile)(NimStringDesc* filename0);
N_NIMCALL(NIM_BOOL, nosfileNewer)(NimStringDesc* a0, NimStringDesc* b0);
N_NIMCALL(void, writemapping_274789_2528170400)(Ropeobj178006* gsymbolmapping0);
N_NIMCALL(void, writeheader_563149_839829468)(Tcgen529027* m0);
N_NIMCALL(void, nossplitFile)(NimStringDesc* path0, TY128506* Result);
N_NIMCALL(void, resetmodule_562763_839829468)(Tcgen529027* m0);
N_NIMCALL(void, nullify_562833_839829468)(Ropeobj178006** arr0);
N_NIMCALL(void, nullify_562858_839829468)(Ropeobj178006** arr0);
STRING_LITERAL(T839829468_4, "\011", 1);
STRING_LITERAL(T839829468_10, "compiler/cgen.nim", 17);
NIM_CONST TY203018 T839829468_9 = {((NimStringDesc*) &T839829468_10),
((NI) 1158)}
;
STRING_LITERAL(T839829468_11, "T", 1);
STRING_LITERAL(T839829468_12, "_", 1);
STRING_LITERAL(T839829468_13, "added pending module twice: ", 28);
STRING_LITERAL(T839829468_14, ".h", 2);
STRING_LITERAL(T839829468_15, ".cpp", 4);
STRING_LITERAL(T839829468_16, ".m", 2);
STRING_LITERAL(T839829468_17, ".c", 2);
STRING_LITERAL(T839829468_18, "0", 1);
STRING_LITERAL(T839829468_19, "$", 1);
STRING_LITERAL(T839829468_20, "ropes: invalid format string $", 30);
STRING_LITERAL(T839829468_21, "$N#line $2 $1$N", 15);
STRING_LITERAL(T839829468_22, "N_LIB_IMPORT ", 13);
STRING_LITERAL(T839829468_23, "N_LIB_EXPORT ", 13);
STRING_LITERAL(T839829468_24, "static ", 7);
STRING_LITERAL(T839829468_25, "mapType", 7);
STRING_LITERAL(T839829468_26, "void", 4);
STRING_LITERAL(T839829468_27, "getTypeDescAux: t == nil", 24);
STRING_LITERAL(T839829468_28, "TY", 2);
STRING_LITERAL(T839829468_29, "getTypeName: ", 13);
STRING_LITERAL(T839829468_30, "void*", 5);
STRING_LITERAL(T839829468_31, "NimStringDesc", 13);
STRING_LITERAL(T839829468_32, "NimStringDesc*", 14);
STRING_LITERAL(T839829468_33, "NCSTRING", 8);
STRING_LITERAL(T839829468_34, "NIM_BOOL", 8);
STRING_LITERAL(T839829468_35, "NIM_CHAR", 8);
STRING_LITERAL(T839829468_36, "NI", 2);
STRING_LITERAL(T839829468_37, "NI8", 3);
STRING_LITERAL(T839829468_38, "NI16", 4);
STRING_LITERAL(T839829468_39, "NI32", 4);
STRING_LITERAL(T839829468_40, "NI64", 4);
STRING_LITERAL(T839829468_41, "NF", 2);
STRING_LITERAL(T839829468_42, "NF32", 4);
STRING_LITERAL(T839829468_43, "NF64", 4);
STRING_LITERAL(T839829468_44, "NF128", 5);
STRING_LITERAL(T839829468_45, "NU", 2);
STRING_LITERAL(T839829468_46, "NU8", 3);
STRING_LITERAL(T839829468_47, "NU16", 4);
STRING_LITERAL(T839829468_48, "NU32", 4);
STRING_LITERAL(T839829468_49, "NU64", 4);
NIM_CONST TY533943 Numericaltypetostr_533941_839829468 = {((NimStringDesc*) &T839829468_36),
((NimStringDesc*) &T839829468_37),
((NimStringDesc*) &T839829468_38),
((NimStringDesc*) &T839829468_39),
((NimStringDesc*) &T839829468_40),
((NimStringDesc*) &T839829468_41),
((NimStringDesc*) &T839829468_42),
((NimStringDesc*) &T839829468_43),
((NimStringDesc*) &T839829468_44),
((NimStringDesc*) &T839829468_45),
((NimStringDesc*) &T839829468_46),
((NimStringDesc*) &T839829468_47),
((NimStringDesc*) &T839829468_48),
((NimStringDesc*) &T839829468_49)}
;
STRING_LITERAL(T839829468_50, "tyStatic for getSimpleTypeDesc", 30);
STRING_LITERAL(T839829468_51, "cannot generate C type for: ", 28);
STRING_LITERAL(T839829468_52, "&", 1);
STRING_LITERAL(T839829468_53, "*", 1);
STRING_LITERAL(T839829468_54, "$1 $2;$n", 8);
STRING_LITERAL(T839829468_55, "typedef $1 $2 $2;$n", 19);
STRING_LITERAL(T839829468_56, "union", 5);
STRING_LITERAL(T839829468_57, "struct", 6);
STRING_LITERAL(T839829468_58, "getTypeForward(", 15);
STRING_LITERAL(T839829468_59, "typedef NI32 $1;$n", 18);
STRING_LITERAL(T839829468_60, "typedef NU8 $1;$n", 17);
STRING_LITERAL(T839829468_61, "typedef NU16 $1;$n", 18);
STRING_LITERAL(T839829468_62, "typedef NI64 $1;$n", 18);
STRING_LITERAL(T839829468_63, "getTypeDescAux: enum", 20);
STRING_LITERAL(T839829468_64, "typedef $1_PTR($2, $3) $4;$n", 28);
STRING_LITERAL(T839829468_65, "N_NIMCALL", 9);
STRING_LITERAL(T839829468_66, "N_STDCALL", 9);
STRING_LITERAL(T839829468_67, "N_CDECL", 7);
STRING_LITERAL(T839829468_68, "N_SAFECALL", 10);
STRING_LITERAL(T839829468_69, "N_SYSCALL", 9);
STRING_LITERAL(T839829468_70, "N_INLINE", 8);
STRING_LITERAL(T839829468_71, "N_NOINLINE", 10);
STRING_LITERAL(T839829468_72, "N_FASTCALL", 10);
STRING_LITERAL(T839829468_73, "N_CLOSURE", 9);
STRING_LITERAL(T839829468_74, "N_NOCONV", 8);
NIM_CONST TY292016 Callingconvtostr_533587_839829468 = {((NimStringDesc*) &T839829468_65),
((NimStringDesc*) &T839829468_66),
((NimStringDesc*) &T839829468_67),
((NimStringDesc*) &T839829468_68),
((NimStringDesc*) &T839829468_69),
((NimStringDesc*) &T839829468_70),
((NimStringDesc*) &T839829468_71),
((NimStringDesc*) &T839829468_72),
((NimStringDesc*) &T839829468_73),
((NimStringDesc*) &T839829468_74)}
;
STRING_LITERAL(T839829468_75, "typedef struct {$nN_NIMCALL_PTR($2, ClPrc) $3;$nvoid* ClEnv;$n}"
" $1;$n", 69);
STRING_LITERAL(T839829468_76, "struct $2 : #TGenericSeq {$n", 28);
STRING_LITERAL(T839829468_77, "struct $2 {$n #TGenericSeq Sup;$n", 34);
STRING_LITERAL(T839829468_78, " $1 data[SEQ_DECL_SIZE];$n};$n", 31);
STRING_LITERAL(T839829468_79, "TGenericSeq", 11);
STRING_LITERAL(T839829468_80, "typedef $1 $2[$3];$n", 20);
STRING_LITERAL(T839829468_81, "invalid apostrophe type parameter index", 39);
STRING_LITERAL(T839829468_82, "<", 1);
STRING_LITERAL(T839829468_83, " COMMA ", 7);
STRING_LITERAL(T839829468_84, "> ", 2);
extern NIM_CONST TY273427 Cc_273413_2528170400;
STRING_LITERAL(T839829468_85, " {$n", 4);
STRING_LITERAL(T839829468_86, " {$n#TNimType* m_type;$n", 24);
STRING_LITERAL(T839829468_87, " : public $1 {$n", 16);
STRING_LITERAL(T839829468_88, " {$n $1 Sup;$n", 15);
STRING_LITERAL(T839829468_89, "genRecordFieldsAux", 18);
STRING_LITERAL(T839829468_90, "$1.$2", 5);
STRING_LITERAL(T839829468_91, "S", 1);
STRING_LITERAL(T839829468_92, "struct {", 8);
STRING_LITERAL(T839829468_93, "} $1;$n", 7);
STRING_LITERAL(T839829468_94, "genRecordFieldsAux(record case branch)", 38);
STRING_LITERAL(T839829468_95, "union{$n$1} $2;$n", 17);
STRING_LITERAL(T839829468_96, "mangleRecFieldName", 18);
STRING_LITERAL(T839829468_97, "$1 $2[SEQ_DECL_SIZE];$n", 23);
STRING_LITERAL(T839829468_98, "$1 $2:$3;$n", 11);
STRING_LITERAL(T839829468_99, "genRecordFieldsAux()", 20);
STRING_LITERAL(T839829468_100, "char dummy;$n", 13);
STRING_LITERAL(T839829468_101, "};", 2);
STRING_LITERAL(T839829468_102, "$1 $2 {$n", 9);
STRING_LITERAL(T839829468_103, "$1 Field$2;$n", 13);
STRING_LITERAL(T839829468_104, "char dummy;", 11);
STRING_LITERAL(T839829468_105, "Set", 3);
STRING_LITERAL(T839829468_106, "typedef NU$2 $1;$n", 18);
STRING_LITERAL(T839829468_107, "typedef NU8 $1[$2];$n", 21);
STRING_LITERAL(T839829468_108, "getTypeDescAux(", 15);
STRING_LITERAL(T839829468_109, "genProcParams", 13);
STRING_LITERAL(T839829468_110, ", ", 2);
STRING_LITERAL(T839829468_111, " ", 1);
STRING_LITERAL(T839829468_112, ", NI $1Len$2", 12);
STRING_LITERAL(T839829468_113, " Result", 7);
STRING_LITERAL(T839829468_114, "void* ClEnv", 11);
STRING_LITERAL(T839829468_115, "...", 3);
STRING_LITERAL(T839829468_116, "void)", 5);
STRING_LITERAL(T839829468_117, ")", 1);
STRING_LITERAL(T839829468_118, "(", 1);
STRING_LITERAL(T839829468_119, "$1($2, $3)$4", 12);
STRING_LITERAL(T839829468_120, "proc has no result symbol", 25);
STRING_LITERAL(T839829468_121, " register", 9);
STRING_LITERAL(T839829468_122, " volatile", 9);
STRING_LITERAL(T839829468_123, "$1 = $2;$n", 10);
STRING_LITERAL(T839829468_124, "(*$1)", 5);
STRING_LITERAL(T839829468_125, ";", 1);
STRING_LITERAL(T839829468_126, "FR.s[$1].address = (void*)$3; FR.s[$1].typ = $4; FR.s[$1].name "
"= $2;$n", 70);
STRING_LITERAL(T839829468_127, "NTI$1", 5);
STRING_LITERAL(T839829468_128, "(&", 2);
STRING_LITERAL(T839829468_129, "TNimType", 8);
STRING_LITERAL(T839829468_130, "TNimNode", 8);
STRING_LITERAL(T839829468_131, "extern TNimType $1; /* $2 */$n", 30);
STRING_LITERAL(T839829468_132, "0", 1);
STRING_LITERAL(T839829468_133, "void*", 5);
STRING_LITERAL(T839829468_134, "$1.size = sizeof($2);$n$1.kind = $3;$n$1.base = $4;$n", 53);
STRING_LITERAL(T839829468_135, "$1.flags = $2;$n", 16);
STRING_LITERAL(T839829468_136, "TNimType $1; /* $2 */$n", 23);
STRING_LITERAL(T839829468_137, "genTypeInfo(", 12);
STRING_LITERAL(T839829468_138, "$1[$2]", 6);
STRING_LITERAL(T839829468_139, "static TNimNode* $1[$2];$n", 26);
STRING_LITERAL(T839829468_140, "$1[$2] = &$3;$n", 15);
STRING_LITERAL(T839829468_141, "$1.kind = 1;$n$1.offset = offsetof($2, Field$3);$n$1.typ = $4;$"
"n$1.name = \"Field$3\";$n", 86);
STRING_LITERAL(T839829468_142, "$1.len = $2; $1.kind = 2; $1.sons = &$3[0];$n", 45);
STRING_LITERAL(T839829468_143, "$1.len = $2; $1.kind = 2;$n", 27);
STRING_LITERAL(T839829468_144, "$1.node = &$2;$n", 16);
STRING_LITERAL(T839829468_145, "#nimGCvisit((void*)$1, op);$n", 29);
STRING_LITERAL(T839829468_146, "N_NIMCALL(void, $1)(void* p, NI op)", 35);
STRING_LITERAL(T839829468_147, "$1 a;$n", 7);
STRING_LITERAL(T839829468_148, "a = ($1)p;$n", 12);
STRING_LITERAL(T839829468_149, "LOC", 3);
STRING_LITERAL(T839829468_150, "$1 = ($2)0;$n", 13);
STRING_LITERAL(T839829468_151, "<string.h>", 10);
STRING_LITERAL(T839829468_152, "memset((void*)$1, 0, sizeof($2));$n", 35);
STRING_LITERAL(T839829468_153, ".Sup", 4);
STRING_LITERAL(T839829468_154, "$1.m_type = $2;$n", 17);
STRING_LITERAL(T839829468_155, "#objectInit($1, $2);$n", 22);
STRING_LITERAL(T839829468_156, "for ($1 = 0; $1 < $2->$3; $1++) {$n", 35);
STRING_LITERAL(T839829468_157, "len", 3);
STRING_LITERAL(T839829468_158, "Sup.len", 7);
STRING_LITERAL(T839829468_159, "for ($1 = 0; $1 < $2; $1++) {$n", 31);
STRING_LITERAL(T839829468_160, "}$n", 3);
STRING_LITERAL(T839829468_161, "$1.Sup", 6);
STRING_LITERAL(T839829468_162, "genTraverseProc", 15);
STRING_LITERAL(T839829468_163, "switch ($1.$2) {$n", 18);
STRING_LITERAL(T839829468_164, "case $1 ... $2:$n", 17);
STRING_LITERAL(T839829468_165, "genLiteral: ty is nil", 21);
STRING_LITERAL(T839829468_166, "(-2147483647 -1)", 16);
STRING_LITERAL(T839829468_167, "IL64($1)", 8);
STRING_LITERAL(T839829468_168, "(IL64(-9223372036854775807) - IL64(1))", 38);
STRING_LITERAL(T839829468_169, "NIM_TRUE", 8);
STRING_LITERAL(T839829468_170, "NIM_FALSE", 9);
STRING_LITERAL(T839829468_171, "ULL", 3);
STRING_LITERAL(T839829468_172, "(($1) $2)", 9);
STRING_LITERAL(T839829468_173, "static NIM_CONST $1 $2 = {NIM_NIL,NIM_NIL};$n", 45);
STRING_LITERAL(T839829468_174, "NIM_NIL", 7);
STRING_LITERAL(T839829468_175, "((#NimStringDesc*) NIM_NIL)", 27);
STRING_LITERAL(T839829468_176, "((#NimStringDesc*) &$1)", 23);
STRING_LITERAL(T839829468_177, "STRING_LITERAL($1, $2, $3);$n", 29);
STRING_LITERAL(T839829468_178, "((#NimStringDesc*) &$1$2)", 25);
STRING_LITERAL(T839829468_179, "genLiteral(", 11);
STRING_LITERAL(T839829468_180, "case $1:$n", 10);
STRING_LITERAL(T839829468_181, "default:$n", 10);
STRING_LITERAL(T839829468_182, "break;$n", 8);
STRING_LITERAL(T839829468_183, "} $n", 4);
STRING_LITERAL(T839829468_184, "genTraverseProc()", 17);
STRING_LITERAL(T839829468_185, "$1.Field$2", 10);
STRING_LITERAL(T839829468_186, "$1.ClEnv", 8);
STRING_LITERAL(T839829468_187, "$1->data[$2]", 12);
STRING_LITERAL(T839829468_188, "a", 1);
STRING_LITERAL(T839829468_189, "(*a)", 4);
STRING_LITERAL(T839829468_190, "$1 {$n$2$3$4}$n", 15);
STRING_LITERAL(T839829468_191, "$1;$n", 5);
STRING_LITERAL(T839829468_192, "$1.marker = $2;$n", 17);
STRING_LITERAL(T839829468_193, "$1.len = $2; $1.kind = 0;$n$3.node = &$1;$n", 43);
STRING_LITERAL(T839829468_194, "$1.offset = $2;$n", 17);
STRING_LITERAL(T839829468_195, "NI $1;$n", 8);
STRING_LITERAL(T839829468_196, "static char* NIM_CONST $1[$2] = {$n$3};$n", 41);
STRING_LITERAL(T839829468_197, "for ($1 = 0; $1 < $2; $1++) {$n$3[$1+$4].kind = 1;$n$3[$1+$4].o"
"ffset = $1;$n$3[$1+$4].name = $5[$1];$n$6[$1] = &$3[$1+$4];$n}$n", 127);
STRING_LITERAL(T839829468_198, "$1.len = $2; $1.kind = 2; $1.sons = &$3[0];$n$4.node = &$1;$n", 61);
STRING_LITERAL(T839829468_199, "$1.flags = 1<<2;$n", 18);
STRING_LITERAL(T839829468_200, "anonymous obj with discriminator", 32);
STRING_LITERAL(T839829468_201, "NimDT_$1_$2", 11);
STRING_LITERAL(T839829468_202, "$1.kind = 3;$n$1.offset = offsetof($2, $3);$n$1.typ = $4;$n$1.n"
"ame = $5;$n$1.sons = &$6[0];$n$1.len = $7;$n", 107);
STRING_LITERAL(T839829468_203, "TNimNode* $1[$2];$n", 19);
STRING_LITERAL(T839829468_204, "genObjectFields; nkOfBranch broken", 34);
STRING_LITERAL(T839829468_205, "genObjectFields(nkRecCase)", 26);
STRING_LITERAL(T839829468_206, "$1.kind = 1;$n$1.offset = offsetof($2, $3);$n$1.typ = $4;$n$1.n"
"ame = $5;$n", 74);
STRING_LITERAL(T839829468_207, "genObjectFields", 15);
STRING_LITERAL(T839829468_208, "$1.deepcopy =(void* (N_RAW_NIMCALL*)(void*))$2;$n", 49);
STRING_LITERAL(T839829468_209, "\011return $1;$n", 13);
STRING_LITERAL(T839829468_210, "Result", 6);
STRING_LITERAL(T839829468_211, "closure generation failed", 25);
STRING_LITERAL(T839829468_212, "$1 = ($2) ClEnv;$n", 18);
STRING_LITERAL(T839829468_213, "__declspec(noreturn) ", 21);
STRING_LITERAL(T839829468_214, "__declspec(naked) ", 18);
STRING_LITERAL(T839829468_215, "$N$1 {$n$2$3$4}$N$N", 19);
STRING_LITERAL(T839829468_216, "$N$1 {$N", 8);
STRING_LITERAL(T839829468_217, "struct {$1} GCFRAME;$n", 22);
STRING_LITERAL(T839829468_218, "nimFrame", 8);
STRING_LITERAL(T839829468_219, "VarSlot", 7);
STRING_LITERAL(T839829468_220, "\011nimfrs($1, $2, $3, $4)$N", 25);
STRING_LITERAL(T839829468_221, "\011nimfr($1, $2)$N", 16);
STRING_LITERAL(T839829468_222, "\011#nimProfile();$n", 17);
STRING_LITERAL(T839829468_223, "{", 1);
STRING_LITERAL(T839829468_224, "\011}BeforeRet: ;$n", 16);
STRING_LITERAL(T839829468_225, "if (((NU)&GCFRAME) < 4096) #nimGCFrame(&GCFRAME);$n", 51);
STRING_LITERAL(T839829468_226, "\011#popFrame();$n", 15);
STRING_LITERAL(T839829468_227, "}$N", 3);
STRING_LITERAL(T839829468_228, "static void* $1;$n", 18);
STRING_LITERAL(T839829468_229, "||", 2);
STRING_LITERAL(T839829468_230, "($1 = #nimLoadLibrary((#NimStringDesc*) &$2))$n", 47);
STRING_LITERAL(T839829468_231, "if (!($1)) #nimLoadLibraryError((#NimStringDesc*) &$2);$n", 57);
STRING_LITERAL(T839829468_232, "if (!($1 = #nimLoadLibrary($2))) #nimLoadLibraryError($2);$n", 60);
STRING_LITERAL(T839829468_233, "loadDynamicLib", 14);
STRING_LITERAL(T839829468_234, "Dl_$1", 5);
STRING_LITERAL(T839829468_235, "\011$1 = ($2) ($3$4));$n", 21);
NIM_CONST TY203018 T839829468_236 = {((NimStringDesc*) &T839829468_10),
((NI) 535)}
;
STRING_LITERAL(T839829468_237, "wrong index: ", 13);
STRING_LITERAL(T839829468_238, "\011$1 = ($2) #nimGetProcAddr($3, $4);$n", 37);
STRING_LITERAL(T839829468_239, "$2 $1;$n", 8);
STRING_LITERAL(T839829468_240, "extern ", 7);
STRING_LITERAL(T839829468_241, "NIM_THREADVAR ", 14);
STRING_LITERAL(T839829468_242, " $1;$n", 6);
STRING_LITERAL(T839829468_243, "cgsym: ", 7);
STRING_LITERAL(T839829468_244, ": ", 2);
STRING_LITERAL(T839829468_245, "extern $1 $2;$n", 15);
STRING_LITERAL(T839829468_246, "extern \"C\" ", 11);
STRING_LITERAL(T839829468_247, " __attribute__((naked))", 23);
STRING_LITERAL(T839829468_248, " __attribute__((noreturn))", 26);
STRING_LITERAL(T839829468_249, "#asgnRef((void**) $1, $2);$n", 28);
STRING_LITERAL(T839829468_250, "#asgnRefNoCycle((void**) $1, $2);$n", 35);
STRING_LITERAL(T839829468_251, "#unsureAsgnRef((void**) $1, $2);$n", 34);
STRING_LITERAL(T839829468_252, "#genericSeqAssign($1, $2, $3);$n", 32);
STRING_LITERAL(T839829468_253, "$1 = #copyString($2);$n", 23);
STRING_LITERAL(T839829468_254, "$3 = $1; $1 = #copyStringRC1($2);$n", 35);
STRING_LITERAL(T839829468_255, "if ($1) #nimGCunrefNoCycle($1);$n", 33);
STRING_LITERAL(T839829468_256, "#unsureAsgnRef((void**) $1, #copyString($2));$n", 47);
STRING_LITERAL(T839829468_257, ".", 1);
STRING_LITERAL(T839829468_258, "ClEnv", 5);
STRING_LITERAL(T839829468_259, "$1.ClPrc = $2.ClPrc;$n", 22);
STRING_LITERAL(T839829468_260, "Field$1", 7);
STRING_LITERAL(T839829468_261, "memcpy((void*)$1, (NIM_CONST void*)$2, sizeof($3));$n", 53);
STRING_LITERAL(T839829468_262, "#genericShallowAssign((void*)$1, (void*)$2, $3);$n", 50);
STRING_LITERAL(T839829468_263, "#genericAssign((void*)$1, (void*)$2, $3);$n", 43);
STRING_LITERAL(T839829468_265, "compiler/ccgexprs.nim", 21);
NIM_CONST TY203018 T839829468_264 = {((NimStringDesc*) &T839829468_265),
((NI) 320)}
;
STRING_LITERAL(T839829468_266, "#genericAssignOpenArray((void*)$1, (void*)$2, $1Len0, $3);$n", 60);
STRING_LITERAL(T839829468_267, "memcpy((void*)$1, (NIM_CONST void*)$2, sizeof($1[0])*$1Len0);$n", 63);
STRING_LITERAL(T839829468_268, "memcpy((void*)$1, (NIM_CONST void*)$2, $3);$n", 45);
STRING_LITERAL(T839829468_269, "genAssignment: ", 15);
STRING_LITERAL(T839829468_270, "request to generate code for .compileTime proc: ", 48);
STRING_LITERAL(T839829468_271, "expr: proc not init ", 20);
STRING_LITERAL(T839829468_272, "NIM_CONST $1 $2 = $3;$n", 23);
STRING_LITERAL(T839829468_273, "{$n", 3);
STRING_LITERAL(T839829468_274, "0x$1,$n", 7);
STRING_LITERAL(T839829468_275, "0x$1, ", 6);
STRING_LITERAL(T839829468_276, "0x$1}$n", 7);
STRING_LITERAL(T839829468_277, "{{$1, $1}", 9);
STRING_LITERAL(T839829468_278, ", {", 3);
STRING_LITERAL(T839829468_279, ",$n", 3);
STRING_LITERAL(T839829468_280, "}", 1);
STRING_LITERAL(T839829468_281, "NIM_CONST struct {$n #TGenericSeq Sup;$n $1 data[$2];$n} $3 ="
" $4;$n", 69);
STRING_LITERAL(T839829468_282, "(($1)&$2)", 9);
STRING_LITERAL(T839829468_283, "$1,$n", 5);
STRING_LITERAL(T839829468_284, "extern NIM_CONST $1 $2;$n", 25);
STRING_LITERAL(T839829468_285, "expr: var not init ", 19);
STRING_LITERAL(T839829468_286, "\011NimThreadVars* NimTV;$n", 24);
STRING_LITERAL(T839829468_287, "\011NimTV = (NimThreadVars*) #GetThreadLocalVars();$n", 50);
STRING_LITERAL(T839829468_288, "NimTV->", 7);
STRING_LITERAL(T839829468_289, "expr: temp not init ", 20);
STRING_LITERAL(T839829468_290, "expr: param not init ", 21);
STRING_LITERAL(T839829468_291, "expr(", 5);
STRING_LITERAL(T839829468_292, "); unknown symbol", 17);
STRING_LITERAL(T839829468_293, "//", 2);
STRING_LITERAL(T839829468_294, "#endb($1, $2);$n", 16);
STRING_LITERAL(T839829468_295, "nimln($1, $2);$n", 16);
STRING_LITERAL(T839829468_296, "LA", 2);
STRING_LITERAL(T839829468_297, "if ($1) goto $2;$n", 18);
STRING_LITERAL(T839829468_298, "if (!($1)) goto $2;$n", 21);
STRING_LITERAL(T839829468_299, "$1: ;$n", 7);
STRING_LITERAL(T839829468_300, "!($1)", 5);
STRING_LITERAL(T839829468_301, "$1", 2);
STRING_LITERAL(T839829468_302, "($3)((NU$2) ~($1))", 18);
STRING_LITERAL(T839829468_303, "-($1)", 5);
STRING_LITERAL(T839829468_304, "($1 > 0? ($1) : -($1))", 22);
STRING_LITERAL(T839829468_305, "(($3)(NU)(NU8)($1))", 19);
STRING_LITERAL(T839829468_306, "(($3)(NU64)(NU8)($1))", 21);
STRING_LITERAL(T839829468_307, "(($3)(NU)(NU16)($1))", 20);
STRING_LITERAL(T839829468_308, "(($3)(NU64)(NU16)($1))", 22);
STRING_LITERAL(T839829468_309, "(($3)(NU64)(NU32)($1))", 22);
STRING_LITERAL(T839829468_310, "(($3)(NU64)(NU)($1))", 20);
STRING_LITERAL(T839829468_311, "(($3)(NU8)(NU)($1))", 19);
STRING_LITERAL(T839829468_312, "(($3)(NU16)(NU)($1))", 20);
STRING_LITERAL(T839829468_313, "(($3)(NU32)(NU64)($1))", 22);
STRING_LITERAL(T839829468_314, "((double) ($1))", 15);
STRING_LITERAL(T839829468_315, "float64ToInt32($1)", 18);
STRING_LITERAL(T839829468_316, "float64ToInt64($1)", 18);
NIM_CONST TY552655 unarithtab_552653_839829468 = {((NimStringDesc*) &T839829468_300),
((NimStringDesc*) &T839829468_301),
((NimStringDesc*) &T839829468_302),
((NimStringDesc*) &T839829468_301),
((NimStringDesc*) &T839829468_303),
((NimStringDesc*) &T839829468_304),
((NimStringDesc*) &T839829468_305),
((NimStringDesc*) &T839829468_306),
((NimStringDesc*) &T839829468_307),
((NimStringDesc*) &T839829468_308),
((NimStringDesc*) &T839829468_309),
((NimStringDesc*) &T839829468_310),
((NimStringDesc*) &T839829468_311),
((NimStringDesc*) &T839829468_312),
((NimStringDesc*) &T839829468_313),
((NimStringDesc*) &T839829468_314),
((NimStringDesc*) &T839829468_314),
((NimStringDesc*) &T839829468_315),
((NimStringDesc*) &T839829468_316)}
;
STRING_LITERAL(T839829468_317, "if ($1 == $2) #raiseOverflow();$n", 33);
STRING_LITERAL(T839829468_318, "((NI$2)-($1))", 13);
NIM_CONST TY551642 opr_551640_839829468 = {((NimStringDesc*) &T839829468_318),
((NimStringDesc*) &T839829468_303),
((NimStringDesc*) &T839829468_304)}
;
STRING_LITERAL(T839829468_319, "(($4)($2) $1 ($4)($3))", 22);
STRING_LITERAL(T839829468_320, "+", 1);
STRING_LITERAL(T839829468_321, "-", 1);
STRING_LITERAL(T839829468_322, "/", 1);
NIM_CONST TY556765 opr_556763_839829468 = {((NimStringDesc*) &T839829468_320),
((NimStringDesc*) &T839829468_321),
((NimStringDesc*) &T839829468_53),
((NimStringDesc*) &T839829468_322)}
;
STRING_LITERAL(T839829468_323, "#nanCheck($1);$n", 16);
STRING_LITERAL(T839829468_324, "#infCheck($1);$n", 16);
STRING_LITERAL(T839829468_325, "(($4)($1) + ($4)($2))", 21);
STRING_LITERAL(T839829468_326, "(($4)($1) - ($4)($2))", 21);
STRING_LITERAL(T839829468_327, "(($4)($1) * ($4)($2))", 21);
STRING_LITERAL(T839829468_328, "(($4)($1) / ($4)($2))", 21);
STRING_LITERAL(T839829468_329, "($4)((NU$3)($1) >> (NU$3)($2))", 30);
STRING_LITERAL(T839829468_330, "($4)((NU$3)($1) << (NU$3)($2))", 30);
STRING_LITERAL(T839829468_331, "($4)($1 & $2)", 13);
STRING_LITERAL(T839829468_332, "($4)($1 | $2)", 13);
STRING_LITERAL(T839829468_333, "($4)($1 ^ $2)", 13);
STRING_LITERAL(T839829468_334, "(($1 <= $2) ? $1 : $2)", 22);
STRING_LITERAL(T839829468_335, "(($1 >= $2) ? $1 : $2)", 22);
STRING_LITERAL(T839829468_336, "($4)((NU$3)($1) + (NU$3)($2))", 29);
STRING_LITERAL(T839829468_337, "($4)((NU$3)($1) - (NU$3)($2))", 29);
STRING_LITERAL(T839829468_338, "($4)((NU$3)($1) * (NU$3)($2))", 29);
STRING_LITERAL(T839829468_339, "($4)((NU$3)($1) / (NU$3)($2))", 29);
STRING_LITERAL(T839829468_340, "($4)((NU$3)($1) % (NU$3)($2))", 29);
STRING_LITERAL(T839829468_341, "($1 == $2)", 10);
STRING_LITERAL(T839829468_342, "($1 <= $2)", 10);
STRING_LITERAL(T839829468_343, "($1 < $2)", 9);
STRING_LITERAL(T839829468_344, "((NU$3)($1) <= (NU$3)($2))", 26);
STRING_LITERAL(T839829468_345, "((NU$3)($1) < (NU$3)($2))", 25);
STRING_LITERAL(T839829468_346, "((NU64)($1) <= (NU64)($2))", 26);
STRING_LITERAL(T839829468_347, "((NU64)($1) < (NU64)($2))", 25);
STRING_LITERAL(T839829468_348, "((NU8)($1) == (NU8)($2))", 24);
STRING_LITERAL(T839829468_349, "((NU8)($1) <= (NU8)($2))", 24);
STRING_LITERAL(T839829468_350, "((NU8)($1) < (NU8)($2))", 23);
STRING_LITERAL(T839829468_351, "($1 != $2)", 10);
NIM_CONST TY551828 binarithtab_551826_839829468 = {((NimStringDesc*) &T839829468_325),
((NimStringDesc*) &T839829468_326),
((NimStringDesc*) &T839829468_327),
((NimStringDesc*) &T839829468_328),
((NimStringDesc*) &T839829468_329),
((NimStringDesc*) &T839829468_330),
((NimStringDesc*) &T839829468_331),
((NimStringDesc*) &T839829468_332),
((NimStringDesc*) &T839829468_333),
((NimStringDesc*) &T839829468_334),
((NimStringDesc*) &T839829468_335),
((NimStringDesc*) &T839829468_334),
((NimStringDesc*) &T839829468_335),
((NimStringDesc*) &T839829468_336),
((NimStringDesc*) &T839829468_337),
((NimStringDesc*) &T839829468_338),
((NimStringDesc*) &T839829468_339),
((NimStringDesc*) &T839829468_340),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_344),
((NimStringDesc*) &T839829468_345),
((NimStringDesc*) &T839829468_346),
((NimStringDesc*) &T839829468_347),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_348),
((NimStringDesc*) &T839829468_349),
((NimStringDesc*) &T839829468_350),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_351)}
;
STRING_LITERAL(T839829468_352, "($1.ClPrc == $2.ClPrc && $1.ClEnv == $2.ClEnv)", 46);
STRING_LITERAL(T839829468_353, "($#)($# + $#)", 13);
STRING_LITERAL(T839829468_354, "($#)($# - $#)", 13);
STRING_LITERAL(T839829468_355, "($#)($# * $#)", 13);
STRING_LITERAL(T839829468_356, "($#)($# / $#)", 13);
STRING_LITERAL(T839829468_357, "($#)($# % $#)", 13);
NIM_CONST TY551281 opr_551279_839829468 = {((NimStringDesc*) &T839829468_353),
((NimStringDesc*) &T839829468_354),
((NimStringDesc*) &T839829468_355),
((NimStringDesc*) &T839829468_356),
((NimStringDesc*) &T839829468_357),
((NimStringDesc*) &T839829468_353),
((NimStringDesc*) &T839829468_354)}
;
STRING_LITERAL(T839829468_358, "((NU8)($1))", 11);
STRING_LITERAL(T839829468_359, "if ($1 < $2 || $1 > $3) #raiseOverflow();$n", 43);
STRING_LITERAL(T839829468_360, "$# = #addInt64($#, $#);$n", 25);
STRING_LITERAL(T839829468_361, "$# = #subInt64($#, $#);$n", 25);
STRING_LITERAL(T839829468_362, "$# = #mulInt64($#, $#);$n", 25);
STRING_LITERAL(T839829468_363, "$# = #divInt64($#, $#);$n", 25);
STRING_LITERAL(T839829468_364, "$# = #modInt64($#, $#);$n", 25);
NIM_CONST TY551281 prc64_551274_839829468 = {((NimStringDesc*) &T839829468_360),
((NimStringDesc*) &T839829468_361),
((NimStringDesc*) &T839829468_362),
((NimStringDesc*) &T839829468_363),
((NimStringDesc*) &T839829468_364),
((NimStringDesc*) &T839829468_360),
((NimStringDesc*) &T839829468_361)}
;
STRING_LITERAL(T839829468_365, "$# = #addInt($#, $#);$n", 23);
STRING_LITERAL(T839829468_366, "$# = #subInt($#, $#);$n", 23);
STRING_LITERAL(T839829468_367, "$# = #mulInt($#, $#);$n", 23);
STRING_LITERAL(T839829468_368, "$# = #divInt($#, $#);$n", 23);
STRING_LITERAL(T839829468_369, "$# = #modInt($#, $#);$n", 23);
NIM_CONST TY551281 prc_551269_839829468 = {((NimStringDesc*) &T839829468_365),
((NimStringDesc*) &T839829468_366),
((NimStringDesc*) &T839829468_367),
((NimStringDesc*) &T839829468_368),
((NimStringDesc*) &T839829468_369),
((NimStringDesc*) &T839829468_365),
((NimStringDesc*) &T839829468_366)}
;
STRING_LITERAL(T839829468_370, "($#)($#)", 8);
STRING_LITERAL(T839829468_371, "#reprInt((NI64)$1)", 18);
STRING_LITERAL(T839829468_372, "#reprFloat($1)", 14);
STRING_LITERAL(T839829468_373, "#reprBool($1)", 13);
STRING_LITERAL(T839829468_374, "#reprChar($1)", 13);
STRING_LITERAL(T839829468_375, "#reprEnum((NI)$1, $2)", 21);
STRING_LITERAL(T839829468_376, "#reprStr($1)", 12);
STRING_LITERAL(T839829468_377, "#reprSet($1, $2)", 16);
STRING_LITERAL(T839829468_378, "$1, $1Len0", 10);
STRING_LITERAL(T839829468_379, "$1->data, $1->$2", 16);
STRING_LITERAL(T839829468_380, "$1, $2", 6);
STRING_LITERAL(T839829468_381, "genRepr()", 9);
STRING_LITERAL(T839829468_382, "#reprOpenArray($1, $2)", 22);
STRING_LITERAL(T839829468_383, "#reprAny($1, $2)", 16);
STRING_LITERAL(T839829468_384, "\'repr\' doesn\'t support \'void\' type", 34);
STRING_LITERAL(T839829468_385, "($1 - 1)", 8);
STRING_LITERAL(T839829468_386, "#subInt($1, 1)", 14);
STRING_LITERAL(T839829468_387, "binaryStmt", 10);
STRING_LITERAL(T839829468_388, "$1 += $2;$n", 11);
STRING_LITERAL(T839829468_389, "$1 -= $2;$n", 11);
NIM_CONST TY557052 opr_557050_839829468 = {((NimStringDesc*) &T839829468_388),
((NimStringDesc*) &T839829468_389)}
;
NIM_CONST TY557052 fun64_557055_839829468 = {((NimStringDesc*) &T839829468_360),
((NimStringDesc*) &T839829468_361)}
;
NIM_CONST TY557052 fun_557060_839829468 = {((NimStringDesc*) &T839829468_365),
((NimStringDesc*) &T839829468_366)}
;
STRING_LITERAL(T839829468_390, "#appendChar($1, $2);$n", 22);
STRING_LITERAL(T839829468_391, "$1->$2 + ", 9);
STRING_LITERAL(T839829468_392, "#appendString($1, $2);$n", 24);
STRING_LITERAL(T839829468_393, "$1 = #rawNewString($2$3);$n", 27);
STRING_LITERAL(T839829468_394, "$1 = #addChar($1, $2);$n", 24);
STRING_LITERAL(T839829468_395, "$1 = #resizeString($1, $2$3);$n", 31);
STRING_LITERAL(T839829468_396, "$1 = ($2) #incrSeqV2(&($1)->Sup, sizeof($3));$n", 47);
STRING_LITERAL(T839829468_397, "$1 = ($2) #incrSeqV2($1, sizeof($3));$n", 39);
STRING_LITERAL(T839829468_398, "$1->data[$1->$2]", 16);
STRING_LITERAL(T839829468_399, "++$1->$2;$n", 11);
STRING_LITERAL(T839829468_400, "(($1) && ($1)->$2 == 0)", 23);
STRING_LITERAL(T839829468_401, "#eqStrings($1, $2)", 18);
STRING_LITERAL(T839829468_402, "(#cmpStrings($1, $2) <= 0)", 26);
STRING_LITERAL(T839829468_403, "(#cmpStrings($1, $2) < 0)", 25);
STRING_LITERAL(T839829468_404, "$1.ClPrc == 0", 13);
STRING_LITERAL(T839829468_405, "$1 == 0", 7);
STRING_LITERAL(T839829468_406, "#nimIntToStr($1)", 16);
STRING_LITERAL(T839829468_407, "#nimInt64ToStr($1)", 18);
STRING_LITERAL(T839829468_408, "#nimBoolToStr($1)", 17);
STRING_LITERAL(T839829468_409, "#nimCharToStr($1)", 17);
STRING_LITERAL(T839829468_410, "#nimFloatToStr($1)", 18);
STRING_LITERAL(T839829468_411, "#cstrToNimstr($1)", 17);
STRING_LITERAL(T839829468_412, "no \'of\' operator available for pure objects", 43);
STRING_LITERAL(T839829468_413, "(($1) && ($2))", 14);
STRING_LITERAL(T839829468_414, "$1.m_type == $2", 15);
STRING_LITERAL(T839829468_415, "Nim_OfCheck_CACHE", 17);
STRING_LITERAL(T839829468_416, "static TNimType* $#[2];$n", 25);
STRING_LITERAL(T839829468_417, "#isObjWithCache($#.m_type, $#, $#)", 34);
STRING_LITERAL(T839829468_418, "($1)", 4);
STRING_LITERAL(T839829468_419, "sizeof($1)", 10);
STRING_LITERAL(T839829468_420, "if ($1) #nimGCunref($1);$n", 26);
STRING_LITERAL(T839829468_421, "($1) #newObjRC1($2, $3)", 23);
STRING_LITERAL(T839829468_422, "($1) #newObj($2, $3)", 20);
STRING_LITERAL(T839829468_423, "$1->finalizer = (void*)$2;$n", 28);
STRING_LITERAL(T839829468_424, "($1) #newObj($2, sizeof($3))", 28);
STRING_LITERAL(T839829468_425, "($1) #newSeqRC1($2, $3)", 23);
STRING_LITERAL(T839829468_426, "($1) #newSeq($2, $3)", 20);
STRING_LITERAL(T839829468_427, "($1)#nimNewSeqOfCap($2, $3)", 27);
STRING_LITERAL(T839829468_428, "((NI)sizeof($1))", 16);
STRING_LITERAL(T839829468_429, "(*($1*) ($2))", 13);
STRING_LITERAL(T839829468_430, "(($1) ($2))", 11);
STRING_LITERAL(T839829468_431, "($1Len0-1)", 10);
STRING_LITERAL(T839829468_432, "$1Len0", 6);
STRING_LITERAL(T839829468_433, "($1 ? (strlen($1)-1) : -1)", 26);
STRING_LITERAL(T839829468_434, "($1 ? strlen($1) : 0)", 21);
STRING_LITERAL(T839829468_435, "($1 ? ($1->Sup.len-1) : -1)", 27);
STRING_LITERAL(T839829468_436, "($1 ? $1->Sup.len : 0)", 22);
STRING_LITERAL(T839829468_437, "($1 ? ($1->len-1) : -1)", 23);
STRING_LITERAL(T839829468_438, "($1 ? $1->len : 0)", 18);
STRING_LITERAL(T839829468_439, "genArrayLen()", 13);
STRING_LITERAL(T839829468_440, "($1->Sup.len)", 13);
STRING_LITERAL(T839829468_441, "$1->len", 7);
STRING_LITERAL(T839829468_442, "unaryStmt", 9);
STRING_LITERAL(T839829468_443, "#nimGCref($1);$n", 16);
STRING_LITERAL(T839829468_444, "#nimGCunref($1);$n", 18);
STRING_LITERAL(T839829468_445, "$1 = #setLengthStr($1, $2);$n", 29);
STRING_LITERAL(T839829468_446, "$1 = ($3) #setLengthSeq(&($1)->Sup, sizeof($4), $2);$n", 54);
STRING_LITERAL(T839829468_447, "$1 = ($3) #setLengthSeq($1, sizeof($4), $2);$n", 46);
STRING_LITERAL(T839829468_448, "($1- $2)", 8);
STRING_LITERAL(T839829468_449, "$1 |= ((", 8);
STRING_LITERAL(T839829468_450, ")1)<<(($2)%(sizeof(", 19);
STRING_LITERAL(T839829468_451, ")*8));$n", 8);
STRING_LITERAL(T839829468_452, "$1 &= ~(((", 10);
STRING_LITERAL(T839829468_453, ")1) << (($2) % (sizeof(", 23);
STRING_LITERAL(T839829468_454, ")*8)));$n", 9);
STRING_LITERAL(T839829468_455, "#countBits32($1)", 16);
STRING_LITERAL(T839829468_456, "#countBits64($1)", 16);
STRING_LITERAL(T839829468_457, "(($1 & ~ $2 ==0)&&($1 != $2))", 29);
STRING_LITERAL(T839829468_458, "(($1 & ~ $2)==0)", 16);
STRING_LITERAL(T839829468_459, "($1 & $2)", 9);
STRING_LITERAL(T839829468_460, "($1 | $2)", 9);
STRING_LITERAL(T839829468_461, "($1 & ~ $2)", 11);
STRING_LITERAL(T839829468_462, "($1 ^ $2)", 9);
STRING_LITERAL(T839829468_463, "fewCmps", 7);
STRING_LITERAL(T839829468_464, "$1 >= $2 && $1 <= $3", 20);
STRING_LITERAL(T839829468_465, "$1 == $2", 8);
STRING_LITERAL(T839829468_466, " || ", 4);
STRING_LITERAL(T839829468_467, "(($1 &(1U<<((NU)($2)&7U)))!=0)", 30);
STRING_LITERAL(T839829468_468, "(($1 &(1U<<((NU)($2)&15U)))!=0)", 31);
STRING_LITERAL(T839829468_469, "(($1 &(1U<<((NU)($2)&31U)))!=0)", 31);
STRING_LITERAL(T839829468_470, "(($1 &((NU64)1<<((NU)($2)&63U)))!=0)", 36);
STRING_LITERAL(T839829468_471, "(($1[(NU)($2)>>3] &(1U<<((NU)($2)&7U)))!=0)", 43);
STRING_LITERAL(T839829468_472, "genSetOp()", 10);
STRING_LITERAL(T839829468_473, "$1[(NU)($2)>>3] |=(1U<<($2&7U));$n", 34);
STRING_LITERAL(T839829468_474, "$1[(NU)($2)>>3] &= ~(1U<<($2&7U));$n", 36);
STRING_LITERAL(T839829468_475, "#cardSet($1, ", 13);
STRING_LITERAL(T839829468_476, "for ($1 = 0; $1 < $2; $1++) { $n $3 = (($4[$1] & ~ $5[$1]) == "
"0);$n if (!$3) break;}$n", 88);
STRING_LITERAL(T839829468_477, "for ($1 = 0; $1 < $2; $1++) { $n $3 = (($4[$1] & ~ $5[$1]) == "
"0);$n if (!$3) break;}$nif ($3) $3 = (memcmp($4, $5, $2) != 0);"
"$n", 129);
STRING_LITERAL(T839829468_478, "|", 1);
STRING_LITERAL(T839829468_479, "& ~", 3);
STRING_LITERAL(T839829468_480, "^", 1);
NIM_CONST TY556428 lookupopr_556426_839829468 = {((NimStringDesc*) &T839829468_476),
((NimStringDesc*) &T839829468_477),
((NimStringDesc*) &T839829468_52),
((NimStringDesc*) &T839829468_478),
((NimStringDesc*) &T839829468_479),
((NimStringDesc*) &T839829468_480)}
;
STRING_LITERAL(T839829468_481, "(memcmp($1, $2, ", 16);
STRING_LITERAL(T839829468_482, ")==0)", 5);
STRING_LITERAL(T839829468_483, "for ($1 = 0; $1 < $2; $1++) $n $3[$1] = $4[$1] $6 $5[$1];$n", 60);
STRING_LITERAL(T839829468_484, "genSetOp", 8);
STRING_LITERAL(T839829468_485, "$1->data", 8);
STRING_LITERAL(T839829468_486, "($1)+($2), ($3)-($2)+1", 22);
STRING_LITERAL(T839829468_487, "(*$1)->data+($2), ($3)-($2)+1", 29);
STRING_LITERAL(T839829468_488, "$1->data+($2), ($3)-($2)+1", 26);
STRING_LITERAL(T839829468_489, "openArrayLoc: ", 14);
STRING_LITERAL(T839829468_490, "", 0);
STRING_LITERAL(T839829468_491, "(*$1)->data, (*$1)->$2", 22);
STRING_LITERAL(T839829468_492, "$1.ClPrc($3$1.ClEnv)", 20);
STRING_LITERAL(T839829468_493, "$1.ClEnv? $1.ClPrc($3$1.ClEnv):(($4)($1.ClPrc))($2)", 51);
STRING_LITERAL(T839829468_494, "$1 = 0;$n", 9);
STRING_LITERAL(T839829468_495, "#chckNil((void*)$1);$n", 22);
STRING_LITERAL(T839829468_496, "#genericReset((void*)$1, $2);$n", 31);
STRING_LITERAL(T839829468_497, ";$n", 3);
STRING_LITERAL(T839829468_499, "compiler/ccgcalls.nim", 21);
NIM_CONST TY203018 T839829468_498 = {((NimStringDesc*) &T839829468_499),
((NI) 423)}
;
static NIM_CONST char136Set T839829468_500 = {
0x00, 0x00, 0x00, 0x00, 0x88, 0x01, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
;
STRING_LITERAL(T839829468_501, "wrong argument count", 20);
STRING_LITERAL(T839829468_502, "call expression expected for C++ pattern", 40);
NIM_CONST TY203018 T839829468_503 = {((NimStringDesc*) &T839829468_499),
((NI) 328)}
;
STRING_LITERAL(T839829468_504, "->", 2);
STRING_LITERAL(T839829468_505, ");$n", 4);
STRING_LITERAL(T839829468_506, "[", 1);
NIM_CONST TY203018 T839829468_507 = {((NimStringDesc*) &T839829468_499),
((NI) 472)}
;
STRING_LITERAL(T839829468_508, "varargs for objective C method?", 31);
STRING_LITERAL(T839829468_509, "Result: ", 8);
STRING_LITERAL(T839829468_510, "];$n", 4);
STRING_LITERAL(T839829468_511, "]", 1);
NIM_CONST TY203018 T839829468_512 = {((NimStringDesc*) &T839829468_265),
((NI) 925)}
;
STRING_LITERAL(T839829468_513, "<stdio.h>", 9);
STRING_LITERAL(T839829468_514, ", \"nil\"", 7);
STRING_LITERAL(T839829468_515, ", $1? ($1)->data:\"nil\"", 22);
STRING_LITERAL(T839829468_516, "printf($1$2);$n", 15);
STRING_LITERAL(T839829468_517, "%s", 2);
STRING_LITERAL(T839829468_518, "fflush(stdout);$n", 17);
STRING_LITERAL(T839829468_519, "#genericDeepCopy((void*)$1, (void*)$2, $3);$n", 45);
STRING_LITERAL(T839829468_520, "#genericSeqDeepCopy($1, $2, $3);$n", 34);
STRING_LITERAL(T839829468_521, "#genericDeepCopyOpenArray((void*)$1, (void*)$2, $1Len0, $3);$n", 62);
STRING_LITERAL(T839829468_522, "genDeepCopy: ", 13);
STRING_LITERAL(T839829468_523, "genMagicExpr: ", 14);
STRING_LITERAL(T839829468_524, "static NIM_CONST $1 $2 = $3;$n", 30);
STRING_LITERAL(T839829468_525, "memset($1, 0, sizeof($1));$n", 28);
STRING_LITERAL(T839829468_526, "for ($1 = $3; $1 <= $4; $1++) $n$2[(NU)($1)>>3] |=(1U<<((NU)($1"
")&7U));$n", 72);
STRING_LITERAL(T839829468_527, "$1[(NU)($2)>>3] |=(1U<<((NU)($2)&7U));$n", 40);
STRING_LITERAL(T839829468_528, "for ($1 = $3; $1 <= $4; $1++) $n$2 |=((", 39);
STRING_LITERAL(T839829468_529, ")(1)<<(($1)%(sizeof(", 20);
STRING_LITERAL(T839829468_530, "$1 |=((", 7);
STRING_LITERAL(T839829468_531, ")(1)<<(($2)%(sizeof(", 20);
STRING_LITERAL(T839829468_532, "genCheckedRecordField", 21);
STRING_LITERAL(T839829468_533, "genObjConstr", 12);
STRING_LITERAL(T839829468_534, "if ($1) #raiseFieldError(((#NimStringDesc*) &$2));$n", 52);
STRING_LITERAL(T839829468_535, "if (!($1)) #raiseFieldError(((#NimStringDesc*) &$2));$n", 55);
STRING_LITERAL(T839829468_536, "LOC$1.source", 12);
STRING_LITERAL(T839829468_537, "union { $1 source; $2 dest; } LOC$3;$n", 38);
STRING_LITERAL(T839829468_538, "LOC$#.dest", 10);
STRING_LITERAL(T839829468_539, "if ((NU)($1) > (NU)($2)) #raiseIndexError();$n", 46);
STRING_LITERAL(T839829468_540, "if ($1 < $2 || $1 > $3) #raiseIndexError();$n", 45);
STRING_LITERAL(T839829468_541, "$1[($2)- $3]", 12);
STRING_LITERAL(T839829468_542, "if ((NU)($1) >= (NU)($2Len0)) #raiseIndexError();$n", 51);
STRING_LITERAL(T839829468_543, "if ((NU)($1) > (NU)($2->$3)) #raiseIndexError();$n", 50);
STRING_LITERAL(T839829468_544, "if ((NU)($1) >= (NU)($2->$3)) #raiseIndexError();$n", 51);
STRING_LITERAL(T839829468_545, "genTupleElem", 12);
STRING_LITERAL(T839829468_546, ".Field$1", 8);
STRING_LITERAL(T839829468_547, "expr(nkBracketExpr, ", 20);
STRING_LITERAL(T839829468_548, "genDeref ", 9);
STRING_LITERAL(T839829468_549, "genRecordFieldAux", 17);
STRING_LITERAL(T839829468_550, "genRecordField 3", 16);
STRING_LITERAL(T839829468_551, ".$1", 3);
STRING_LITERAL(T839829468_552, "} $1: ;$n", 9);
STRING_LITERAL(T839829468_553, "FR.len-=$1;$n", 13);
STRING_LITERAL(T839829468_554, "FR.len+=$1;$n", 13);
STRING_LITERAL(T839829468_555, "if (!$1) goto $2;$n", 19);
STRING_LITERAL(T839829468_556, "goto $1;$n", 10);
STRING_LITERAL(T839829468_557, "genIf()", 7);
STRING_LITERAL(T839829468_558, "->Sup", 5);
STRING_LITERAL(T839829468_559, "$1 = &$2;$n", 11);
STRING_LITERAL(T839829468_560, "if ($1) #chckObj($2.m_type, $3);$n", 34);
STRING_LITERAL(T839829468_561, "#chckObj($1.m_type, $2);$n", 26);
STRING_LITERAL(T839829468_562, "(($1)#$5($2, $3, $4))", 21);
STRING_LITERAL(T839829468_563, "chckRangeF", 10);
STRING_LITERAL(T839829468_564, "chckRange64", 11);
STRING_LITERAL(T839829468_565, "chckRange", 9);
STRING_LITERAL(T839829468_566, "CNSTCLOSURE", 11);
STRING_LITERAL(T839829468_567, "closure to closure created", 26);
STRING_LITERAL(T839829468_568, "$1.ClPrc = $2; $1.ClEnv = $3;$n", 31);
STRING_LITERAL(T839829468_569, "while (1) {$n", 13);
STRING_LITERAL(T839829468_570, "case statement must be exhaustive for computed goto", 51);
STRING_LITERAL(T839829468_571, "case statement has too many cases for computed goto", 51);
STRING_LITERAL(T839829468_572, "case statement has to start at 0 for computed goto", 50);
STRING_LITERAL(T839829468_573, "no case statement found for computed goto", 41);
STRING_LITERAL(T839829468_574, "TMP$1", 5);
STRING_LITERAL(T839829468_575, "static void* $#[$#] = {", 23);
STRING_LITERAL(T839829468_576, "&&TMP$#, ", 9);
STRING_LITERAL(T839829468_577, "&&TMP$#};$n", 11);
STRING_LITERAL(T839829468_578, "goto *$#[$#];$n", 15);
STRING_LITERAL(T839829468_579, "range notation not available for computed goto", 46);
STRING_LITERAL(T839829468_580, "TMP$#:$n", 8);
STRING_LITERAL(T839829468_581, "#nimProfile();$n", 16);
STRING_LITERAL(T839829468_582, "\'goto\' target must be a literal value", 37);
STRING_LITERAL(T839829468_583, "goto NIMSTATE_$#;$n", 19);
STRING_LITERAL(T839829468_584, "$1 = ($2*) #nimGetProcAddr($3, $4);$n", 37);
STRING_LITERAL(T839829468_585, "$2* $1;$n", 9);
STRING_LITERAL(T839829468_586, "#dbgRegisterGlobal($1, &$2, $3);$n", 34);
STRING_LITERAL(T839829468_587, "#nimGCvisit((void*)$1, 0);$n", 28);
STRING_LITERAL(T839829468_588, "N_NIMCALL(void, $1)(void)", 25);
STRING_LITERAL(T839829468_589, "#nimRegisterGlobalMarker($1);$n", 31);
STRING_LITERAL(T839829468_590, "$#($#);$n", 9);
STRING_LITERAL(T839829468_591, "$# = $#;$n", 10);
STRING_LITERAL(T839829468_592, "genVarTuple", 11);
STRING_LITERAL(T839829468_593, "genConstStmt", 12);
STRING_LITERAL(T839829468_594, "for statement not eliminated", 28);
STRING_LITERAL(T839829468_595, "if (#eqStrings($1, $2)) goto $3;$n", 34);
STRING_LITERAL(T839829468_596, "switch (#hashString($1) & $2) {$n", 33);
STRING_LITERAL(T839829468_597, "case $1: $n$2break;$n", 21);
STRING_LITERAL(T839829468_598, "goto LA$1;$n", 12);
STRING_LITERAL(T839829468_599, "LA$1: ;$n", 9);
STRING_LITERAL(T839829468_600, "if ($1 >= $2 && $1 <= $3) goto $4;$n", 36);
STRING_LITERAL(T839829468_601, "if ($1 == $2) goto $3;$n", 24);
STRING_LITERAL(T839829468_602, "NIMSTATE_$#:$n", 14);
STRING_LITERAL(T839829468_603, "switch ($1) {$n", 15);
STRING_LITERAL(T839829468_604, "default: __assume(0);$n", 23);
STRING_LITERAL(T839829468_605, "#popSafePoint();$n", 18);
STRING_LITERAL(T839829468_606, "#popCurrentException();$n", 25);
STRING_LITERAL(T839829468_607, "if ($1.status != 0) #popCurrentException();$n", 45);
STRING_LITERAL(T839829468_608, "goto BeforeRet;$n", 17);
STRING_LITERAL(T839829468_609, "no loop to break", 16);
STRING_LITERAL(T839829468_610, "extern $1", 9);
STRING_LITERAL(T839829468_611, "#FieldDiscriminantCheck((NI)(NU)($1), (NI)(NU)($2), $3, $4);$n", 62);
STRING_LITERAL(T839829468_612, "genAsmOrEmitStmt()", 18);
STRING_LITERAL(T839829468_613, "\"", 1);
STRING_LITERAL(T839829468_614, "\\n\"\012", 4);
STRING_LITERAL(T839829468_615, "Exception", 9);
STRING_LITERAL(T839829468_616, "E_Base", 6);
STRING_LITERAL(T839829468_617, "try {$n", 7);
STRING_LITERAL(T839829468_618, "} catch (NimException& $1) {$n", 30);
STRING_LITERAL(T839829468_619, "#setFrame((TFrame*)&FR);$n", 26);
STRING_LITERAL(T839829468_620, "else ", 5);
STRING_LITERAL(T839829468_621, "#isObj($1.exp->m_type, $2)", 26);
STRING_LITERAL(T839829468_622, "if ($1) ", 8);
STRING_LITERAL(T839829468_623, "throw;$n", 8);
STRING_LITERAL(T839829468_624, "<setjmp.h>", 10);
STRING_LITERAL(T839829468_625, "#TSafePoint $1;$n", 17);
STRING_LITERAL(T839829468_626, "#pushSafePoint(&$1);$n", 22);
STRING_LITERAL(T839829468_627, "nimStdSetjmp", 12);
STRING_LITERAL(T839829468_628, "$1.status = setjmp($1.context);$n", 33);
STRING_LITERAL(T839829468_629, "nimSigSetjmp", 12);
STRING_LITERAL(T839829468_630, "$1.status = sigsetjmp($1.context, 0);$n", 39);
STRING_LITERAL(T839829468_631, "nimRawSetjmp", 12);
STRING_LITERAL(T839829468_632, "$1.status = _setjmp($1.context);$n", 34);
STRING_LITERAL(T839829468_633, "if ($1.status == 0) {$n", 23);
STRING_LITERAL(T839829468_634, "else {$n", 8);
STRING_LITERAL(T839829468_635, "else", 4);
STRING_LITERAL(T839829468_636, "$1.status = 0;$n", 16);
STRING_LITERAL(T839829468_637, "#isObj(#getCurrentException()->Sup.m_type, $1)", 46);
STRING_LITERAL(T839829468_638, "#isObj(#getCurrentException()->m_type, $1)", 42);
STRING_LITERAL(T839829468_639, "if ($1) {$n", 11);
STRING_LITERAL(T839829468_640, "if ($1.status != 0) #reraiseException();$n", 42);
STRING_LITERAL(T839829468_641, "#raiseException((#Exception*)$1, $2);$n", 39);
STRING_LITERAL(T839829468_642, "#reraiseException();$n", 22);
STRING_LITERAL(T839829468_643, "/*TYPESECTION*/", 15);
STRING_LITERAL(T839829468_644, "/*VARSECTION*/", 14);
STRING_LITERAL(T839829468_645, "/*INCLUDESECTION*/", 18);
STRING_LITERAL(T839829468_646, "bp", 2);
STRING_LITERAL(T839829468_647, "#dbgRegisterBreakpoint($1, (NCSTRING)$2, (NCSTRING)$3);$n", 57);
STRING_LITERAL(T839829468_648, "#dbgRegisterWatchpoint($1, (NCSTRING)$2, $3);$n", 47);
STRING_LITERAL(T839829468_649, "#pragma omp parallel for $4$nfor ($1 = $2; $1 <= $3; ++$1)", 58);
STRING_LITERAL(T839829468_651, "compiler/ccgstmts.nim", 21);
NIM_CONST TY203018 T839829468_650 = {((NimStringDesc*) &T839829468_651),
((NI) 145)}
;
STRING_LITERAL(T839829468_652, "STATE$1: ;$n", 12);
STRING_LITERAL(T839829468_653, "case -1: goto BeforeRet;$n", 26);
STRING_LITERAL(T839829468_654, "case $1: goto STATE$1;$n", 24);
STRING_LITERAL(T839829468_655, "if (((NI*) $1)[0] < 0) break;$n", 31);
STRING_LITERAL(T839829468_656, "if ((((NI*) $1.ClEnv)[0]) < 0) break;$n", 39);
STRING_LITERAL(T839829468_657, "); unknown node kind", 20);
NIM_CONST TY203018 T839829468_658 = {((NimStringDesc*) &T839829468_651),
((NI) 1122)}
;
STRING_LITERAL(T839829468_659, "Init000", 7);
STRING_LITERAL(T839829468_660, "DatInit000", 10);
STRING_LITERAL(T839829468_661, "NIM_EXTERNC N_NOINLINE(void, $1)(void);$N", 41);
STRING_LITERAL(T839829468_662, "\011$1();$N", 8);
STRING_LITERAL(T839829468_663, "N_CDECL(void, NimMainInner)(void) {$N$1}$N$NN_CDECL(void, NimMa"
"in)(void) {$N\011void (*volatile inner)();$N\011PreMain();$N\011inner = N"
"imMainInner;$N$2\011(*inner)();$N}$N$N", 162);
STRING_LITERAL(T839829468_664, "N_STDCALL(int, WinMain)(HINSTANCE hCurInstance, $N "
" HINSTANCE hPrevInstance, $N LP"
"STR lpCmdLine, int nCmdShow) {$N\011NimMain();$N\011return nim_program"
"_result;$N}$N$N", 206);
STRING_LITERAL(T839829468_665, "N_LIB_EXPORT N_CDECL(void, NimMainInner)(void) {$N$1}$N$NN_CDEC"
"L(void, NimMain)(void) {$N\011void (*volatile inner)();$N\011PreMain()"
";$N\011inner = NimMainInner;$N$2\011(*inner)();$N}$N$N", 175);
STRING_LITERAL(T839829468_666, "BOOL WINAPI DllMain(HINSTANCE hinstDLL, DWORD fwdreason, $N "
" LPVOID lpvReserved) {$N\011if(fwdreason == DLL_PROC"
"ESS_ATTACH) {$N\011NimMain();$N}$N\011return 1;$N}$N$N", 175);
STRING_LITERAL(T839829468_667, "<windows.h>", 11);
STRING_LITERAL(T839829468_668, "void NIM_POSIX_INIT NimMainInit(void) {$N\011NimMain();$N}$N$N", 59);
STRING_LITERAL(T839829468_669, "int cmdCount;$Nchar** cmdLine;$Nchar** gEnv;$NN_CDECL(void, Nim"
"MainInner)(void) {$N$1}$N$NN_CDECL(void, NimMain)(void) {$N\011void"
" (*volatile inner)();$N\011PreMain();$N\011inner = NimMainInner;$N$2\011("
"*inner)();$N}$N$N", 208);
STRING_LITERAL(T839829468_670, "int main(void) {$N\011NimMain();$N\011return 0;$N}$N$N", 48);
STRING_LITERAL(T839829468_671, "int main(int argc, char** args, char** env) {$N\011cmdLine = args;"
"$N\011cmdCount = argc;$N\011gEnv = env;$N\011NimMain();$N\011return nim_prog"
"ram_result;$N}$N$N", 145);
STRING_LITERAL(T839829468_672, "dbgRegisterBreakpoint", 21);
STRING_LITERAL(T839829468_673, "dbgRegisterFilename", 19);
STRING_LITERAL(T839829468_674, "dbgRegisterFilename($1);$N", 26);
STRING_LITERAL(T839829468_675, "\011#initStackBottomWith((void *)&inner);$N", 40);
STRING_LITERAL(T839829468_676, "void PreMainInner() {$N\011systemInit000();$N$1$2$3}$N$Nvoid PreMa"
"in() {$N\011void (*volatile inner)();$N\011systemDatInit000();$N\011inner"
" = PreMainInner;$N$4$5\011(*inner)();$N}$N$N", 168);
STRING_LITERAL(T839829468_677, "\011#initThreadVarsEmulation();$N", 30);
STRING_LITERAL(T839829468_678, "still forwarded: ", 17);
STRING_LITERAL(T839829468_679, "NIM_EXTERNC N_NOINLINE(void, $1)(void) {$N", 42);
STRING_LITERAL(T839829468_680, "static #TNimNode $1[$2];$n", 26);
STRING_LITERAL(T839829468_681, "static #TNimType $1[$2];$n", 26);
STRING_LITERAL(T839829468_682, "\011TFrame FR; FR.len = 0;$N", 25);
STRING_LITERAL(T839829468_683, "}$N$N", 5);
STRING_LITERAL(T839829468_684, "N_NIMCALL(void, nimLoadProcs$1)(void) {$2}$N$N", 46);
STRING_LITERAL(T839829468_685, "/* Generated by Nim Compiler v$1 */$N/* (c) 2016 Andreas Rump"
"f */$N/* The generated code is subject to the original license. "
"*/$N", 131);
STRING_LITERAL(T839829468_686, "0.15.0", 6);
STRING_LITERAL(T839829468_687, "/* Generated by Nim Compiler v$1 */$N/* (c) 2016 Andreas Rump"
"f */$N/* The generated code is subject to the original license. "
"*/$N/* Compiled for: $2, $3, $4 */$N/* Command for C compiler:$n"
" $5 */$N", 201);
extern NIM_CONST TY176082 Os_176068_4151366050;
extern NIM_CONST TY176510 Cpu_176496_4151366050;
STRING_LITERAL(T839829468_688, "#define NIM_INTBITS $1", 22);
STRING_LITERAL(T839829468_689, "typedef struct {$1} NimThreadVars;$n", 36);
STRING_LITERAL(T839829468_690, "#include \"nimbase.h\"", 20);
STRING_LITERAL(T839829468_691, "#include \"$1\"$N", 15);
STRING_LITERAL(T839829468_692, "#include $1$N", 13);
STRING_LITERAL(T839829468_693, "extern \"C\"", 10);
STRING_LITERAL(T839829468_694, "$#NI NimThreadVarsSize(){return (NI)sizeof(NimThreadVars);}$n", 61);
STRING_LITERAL(T839829468_695, "__$1__", 6);
STRING_LITERAL(T839829468_696, "#ifndef $1$n#define $1$n", 24);
STRING_LITERAL(T839829468_697, "N_CDECL(void, NimMain)(void);$n", 31);
STRING_LITERAL(T839829468_698, "#endif /* $1 */$n", 17);
Tcgen529027* generatedheader_532201_839829468;
extern TNimType NTI529015; /* BModule */
Ropeobj178006* indent_532655_839829468;
extern TNimType NTI178004; /* Rope */
extern Gcheap49818 gch_49858_1689653243;
Ropeobj178006* nimtv_538656_839829468;
Ttypeseq292836* nimtvdeps_538674_839829468;
extern TNimType NTI292836; /* TTypeSeq */
Intset268030 nimtvdeclared_538675_839829468;
extern TNimType NTI268030; /* IntSet */
NI breakpointid_548860_839829468;
Ropeobj178006* gbreakpoints_548861_839829468;
extern TY529153* gmodules_529170_3723162438;
extern TNimType NTI529027; /* TCGen */
extern Debuginfo203009 gdebuginfo_203470_1926258066;
extern Toption169009Set goptions_169128_2607990831;
extern TNimType NTI292804; /* TSymSeq */
extern Tglobaloption169013Set gglobaloptions_169130_2607990831;
extern NimStringDesc* headerfile_169138_2607990831;
extern NimStringDesc* gprojectfull_169211_2607990831;
extern Tcommands169076 gcmd_169132_2607990831;
extern NI gerrorcounter_192069_155036129;
extern Ropeobj178006* rnl_178903_2381377266;
extern NI gforwardedprocscounter_529171_3723162438;
extern TNimType NTI292244; /* TTypeKind */
extern TNimType NTI203017; /* seq[(string, int)] */
extern Tsystemcc273002 ccompiler_273431_2528170400;
extern NimStringDesc* tnl_176644_4151366050;
extern NI floatsize_176642_4151366050;
extern Tgcmode169080 gselectedgc_169133_2607990831;
extern TNimType NTI292020; /* TNodeKind */
extern TNimType NTI135002; /* seq[string] */
extern TNimType NTI292435; /* TSymKind */
extern TNimType NTI292816; /* TLoc */
extern NI intsize_176641_4151366050;
extern TNimType NTI292524; /* TMagic */
extern TNimType NTI191350; /* seq[Rope] */
extern TNimType NTI292796; /* TNodeSeq */
extern Ropeobj178006* mainmodprocs_529148_3723162438;
extern Ropeobj178006* maindatinit_529151_3723162438;
extern Ropeobj178006* mainmodinit_529149_3723162438;
extern Ropeobj178006* othermodsinit_529150_3723162438;
extern Tsystemos176004 targetos_176629_4151366050;
extern TY191612* fileinfos_191629_155036129;
extern Tsystemcpu176452 targetcpu_176627_4151366050;
extern Ropeobj178006* gmapping_529152_3723162438;
N_NIMCALL(void, T839829468_2)(void) {
nimGCvisit((void*)generatedheader_532201_839829468, 0);
}
N_NIMCALL(void, T839829468_3)(void) {
nimGCvisit((void*)indent_532655_839829468, 0);
}
static N_INLINE(Cell47305*, usrtocell_51440_1689653243)(void* usr0) {
Cell47305* result0;
result0 = (Cell47305*)0;
result0 = ((Cell47305*) ((NI)((NU64)(((NI) (usr0))) - (NU64)(((NI)sizeof(Cell47305))))));
return result0;
}
static N_INLINE(void, rtladdzct_52601_1689653243)(Cell47305* c0) {
addzct_51417_1689653243((&gch_49858_1689653243.zct), c0);
}
static N_INLINE(void, asgnRefNoCycle)(void** dest0, void* src0) {
{
Cell47305* c0;
if (!!((src0 == NIM_NIL))) goto LA3;
c0 = usrtocell_51440_1689653243(src0);
(*c0).refcount += ((NI) 8);
}
LA3: ;
{
Cell47305* c0;
if (!!(((*dest0) == NIM_NIL))) goto LA7;
c0 = usrtocell_51440_1689653243((*dest0));
{
(*c0).refcount -= ((NI) 8);
if (!((NU64)((*c0).refcount) < (NU64)(((NI) 8)))) goto LA11;
rtladdzct_52601_1689653243(c0);
}
LA11: ;
}
LA7: ;
(*dest0) = src0;
}
N_NIMCALL(void, T839829468_5)(void) {
nimGCvisit((void*)nimtv_538656_839829468, 0);
}
N_NIMCALL(void, T839829468_6)(void) {
nimGCvisit((void*)nimtvdeps_538674_839829468, 0);
}
static N_INLINE(void, nimGCunrefNoCycle)(void* p0) {
Cell47305* c0;
c0 = usrtocell_51440_1689653243(p0);
{
(*c0).refcount -= ((NI) 8);
if (!((NU64)((*c0).refcount) < (NU64)(((NI) 8)))) goto LA3;
rtladdzct_52601_1689653243(c0);
}
LA3: ;
}
N_NIMCALL(void, T839829468_7)(void) {
nimGCvisit((void*)nimtvdeclared_538675_839829468.head, 0);
nimGCvisit((void*)nimtvdeclared_538675_839829468.data, 0);
}
N_NIMCALL(void, T839829468_8)(void) {
nimGCvisit((void*)gbreakpoints_548861_839829468, 0);
}
N_NIMCALL(Tcgen529027*, getcgenmodule_532226_839829468)(Tsym292834* s0) {
Tcgen529027* result0;
result0 = (Tcgen529027*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (((NI) 0) <= (*s0).position);
if (!(LOC3)) goto LA4;
LOC3 = ((*s0).position < (gmodules_529170_3723162438 ? gmodules_529170_3723162438->Sup.len : 0));
LA4: ;
if (!LOC3) goto LA5;
result0 = gmodules_529170_3723162438->data[(*s0).position];
}
goto LA1;
LA5: ;
{
result0 = NIM_NIL;
}
LA1: ;
return result0;
}
static N_INLINE(void, copymem_7485_1689653243)(void* dest0, void* source0, NI size0) {
void* LOC1;
LOC1 = (void*)0;
LOC1 = memcpy(dest0, source0, ((size_t) (size0)));
}
static N_INLINE(void, appendString)(NimStringDesc* dest0, NimStringDesc* src0) {
copymem_7485_1689653243(((void*) ((&(*dest0).data[((*dest0).Sup.len)- 0]))), ((void*) ((*src0).data)), ((NI) ((NI)((*src0).Sup.len + ((NI) 1)))));
(*dest0).Sup.len += (*src0).Sup.len;
}
N_NIMCALL(NU32, hashowner_532977_839829468)(Tsym292834* s0) {
NU32 result0;
Tsym292834* m0;
Tsym292834* p0;
result0 = (NU32)0;
m0 = s0;
{
while (1) {
if (!!(((*m0).kind == ((Tsymkind292435) 6)))) goto LA2;
m0 = (*m0).owner;
} LA2: ;
}
p0 = (*m0).owner;
result0 = register_203121_1926258066((&gdebuginfo_203470_1926258066), (*(*p0).name).s, (*(*m0).name).s);
return result0;
}
static N_INLINE(void, incref_53419_1689653243)(Cell47305* c0) {
(*c0).refcount = (NI)((NU64)((*c0).refcount) + (NU64)(((NI) 8)));
}
static N_INLINE(void, decref_53001_1689653243)(Cell47305* c0) {
{
(*c0).refcount -= ((NI) 8);
if (!((NU64)((*c0).refcount) < (NU64)(((NI) 8)))) goto LA3;
rtladdzct_52601_1689653243(c0);
}
LA3: ;
}
static N_INLINE(void, asgnRef)(void** dest0, void* src0) {
{
Cell47305* LOC5;
if (!!((src0 == NIM_NIL))) goto LA3;
LOC5 = (Cell47305*)0;
LOC5 = usrtocell_51440_1689653243(src0);
incref_53419_1689653243(LOC5);
}
LA3: ;
{
Cell47305* LOC10;
if (!!(((*dest0) == NIM_NIL))) goto LA8;
LOC10 = (Cell47305*)0;
LOC10 = usrtocell_51440_1689653243((*dest0));
decref_53001_1689653243(LOC10);
}
LA8: ;
(*dest0) = src0;
}
N_NIMCALL(Toption169009Set, initprocoptions_562635_839829468)(Tcgen529027* m0) {
Toption169009Set result0;
memset((void*)(&result0), 0, sizeof(result0));
{
if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 13))&31U)))!=0)) goto LA3;
result0 = (goptions_169128_2607990831 & ~ 32768);
}
goto LA1;
LA3: ;
{
result0 = goptions_169128_2607990831;
}
LA1: ;
return result0;
}
N_NIMCALL(Tcproc529021*, newpreinitproc_562625_839829468)(Tcgen529027* m0) {
Tcproc529021* result0;
result0 = (Tcproc529021*)0;
result0 = newproc_529206_3723162438(NIM_NIL, m0);
(*result0).labels = ((NI) 100000);
return result0;
}
N_NIMCALL(Tcproc529021*, newpostinitproc_562630_839829468)(Tcgen529027* m0) {
Tcproc529021* result0;
result0 = (Tcproc529021*)0;
result0 = newproc_529206_3723162438(NIM_NIL, m0);
(*result0).labels = ((NI) 200000);
return result0;
}
N_NIMCALL(Ropeobj178006*, gettempname_533598_839829468)(Tcgen529027* m0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = rope_178401_2381377266(((NI64) ((*m0).labels)));
result0 = HEX26_178418_2381377266((*m0).tmpbase, LOC1);
(*m0).labels += ((NI) 1);
return result0;
}
N_NIMCALL(Tcgen529027*, rawnewmodule_562663_839829468)(Tsym292834* module0, NimStringDesc* filename0) {
Tcgen529027* result0;
NimStringDesc* LOC1;
NU32 LOC2;
NimStringDesc* LOC3;
NimStringDesc* LOC4;
NimStringDesc* LOC5;
result0 = (Tcgen529027*)0;
result0 = (Tcgen529027*) newObj((&NTI529015), sizeof(Tcgen529027));
(*result0).Sup.Sup.m_type = (&NTI529027);
LOC1 = (NimStringDesc*)0;
LOC2 = (NU32)0;
LOC2 = hashowner_532977_839829468(module0);
LOC3 = (NimStringDesc*)0;
LOC3 = HEX24_8401_1689653243(((NU64) (LOC2)));
LOC1 = rawNewString(LOC3->Sup.len + 2);
appendString(LOC1, ((NimStringDesc*) &T839829468_11));
appendString(LOC1, LOC3);
appendString(LOC1, ((NimStringDesc*) &T839829468_12));
asgnRefNoCycle((void**) (&(*result0).tmpbase), rope_178277_2381377266(LOC1));
initlinkedlist_147031_3771138726((&(*result0).headerfiles));
initintset_268885_2627731572((&(*result0).declaredthings));
initintset_268885_2627731572((&(*result0).declaredprotos));
LOC4 = (NimStringDesc*)0;
LOC4 = (*result0).cfilename; (*result0).cfilename = copyStringRC1(filename0);
if (LOC4) nimGCunrefNoCycle(LOC4);
LOC5 = (NimStringDesc*)0;
LOC5 = (*result0).filename; (*result0).filename = copyStringRC1(filename0);
if (LOC5) nimGCunrefNoCycle(LOC5);
initidtable_296019_850551059((&(*result0).typecache));
initidtable_296019_850551059((&(*result0).forwtypecache));
asgnRefNoCycle((void**) (&(*result0).module), module0);
initintset_268885_2627731572((&(*result0).typeinfomarker));
asgnRef((void**) (&(*result0).initproc), newproc_529206_3723162438(NIM_NIL, result0));
(*(*result0).initproc).options = initprocoptions_562635_839829468(result0);
asgnRef((void**) (&(*result0).preinitproc), newpreinitproc_562625_839829468(result0));
asgnRef((void**) (&(*result0).postinitproc), newpostinitproc_562630_839829468(result0));
initnodetable_296085_850551059((&(*result0).datacache));
if ((*result0).typestack) nimGCunrefNoCycle((*result0).typestack);
(*result0).typestack = (Ttypeseq292836*) newSeqRC1((&NTI292836), 0);
if ((*result0).forwardedprocs) nimGCunrefNoCycle((*result0).forwardedprocs);
(*result0).forwardedprocs = (Tsymseq292804*) newSeqRC1((&NTI292804), 0);
asgnRefNoCycle((void**) (&(*result0).typenodesname), gettempname_533598_839829468(result0));
asgnRefNoCycle((void**) (&(*result0).nimtypesname), gettempname_533598_839829468(result0));
{
if (!(((*module0).flags &(1U<<((NU)(((Tsymflag292184) 13))&31U)))!=0)) goto LA8;
(*result0).flags |= ((NU8)1)<<((((Codegenflag529025) 0))%(sizeof(NU8)*8));
(*(*result0).preinitproc).options &= ~(((NU32)1) << ((((Toption169009) 15)) % (sizeof(NU32)*8)));
(*(*result0).postinitproc).options &= ~(((NU32)1) << ((((Toption169009) 15)) % (sizeof(NU32)*8)));
}
LA8: ;
return result0;
}
N_NIMCALL(Tcgen529027*, rawnewmodule_563038_839829468)(Tsym292834* module0) {
Tcgen529027* result0;
NimStringDesc* LOC1;
result0 = (Tcgen529027*)0;
LOC1 = (NimStringDesc*)0;
LOC1 = tofullpath_192261_155036129(((NI32) ((*module0).position)));
result0 = rawnewmodule_562663_839829468(module0, LOC1);
return result0;
}
N_NIMCALL(Tcgen529027*, newmodule_563044_839829468)(Tsym292834* module0) {
Tcgen529027* result0;
result0 = (Tcgen529027*)0;
{
Tcgen529027* LOC3;
NimStringDesc* LOC6;
LOC3 = (Tcgen529027*)0;
LOC3 = getcgenmodule_532226_839829468(module0);
if (!!((LOC3 == NIM_NIL))) goto LA4;
LOC6 = (NimStringDesc*)0;
LOC6 = HEX24_196185_1689653243(T839829468_9);
internalerror_196113_155036129(LOC6);
}
LA4: ;
result0 = rawnewmodule_563038_839829468(module0);
{
if (!((gmodules_529170_3723162438 ? gmodules_529170_3723162438->Sup.len : 0) <= (*module0).position)) goto LA9;
gmodules_529170_3723162438 = (TY529153*) setLengthSeq(&(gmodules_529170_3723162438)->Sup, sizeof(Tcgen529027*), ((NI) ((NI)((*module0).position + ((NI) 1)))));
}
LA9: ;
asgnRef((void**) (&gmodules_529170_3723162438->data[(*module0).position]), result0);
{
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 2))&63U)))!=0)) goto LA13;
{
NimStringDesc* LOC19;
NimStringDesc* LOC20;
if (!(((*module0).flags &(1U<<((NU)(((Tsymflag292184) 25))&31U)))!=0)) goto LA17;
LOC19 = (NimStringDesc*)0;
LOC20 = (NimStringDesc*)0;
LOC20 = tofilename_192257_155036129(((NI32) ((*module0).position)));
LOC19 = rawNewString(LOC20->Sup.len + 28);
appendString(LOC19, ((NimStringDesc*) &T839829468_13));
appendString(LOC19, LOC20);
internalerror_196113_155036129(LOC19);
}
LA17: ;
}
LA13: ;
return result0;
}
N_NIMCALL(Tpasscontext341002*, myopen_563112_839829468)(Tsym292834* module0) {
Tpasscontext341002* result0;
Tcgen529027* LOC1;
result0 = (Tpasscontext341002*)0;
LOC1 = (Tcgen529027*)0;
LOC1 = newmodule_563044_839829468(module0);
result0 = &LOC1->Sup;
{
NIM_BOOL LOC4;
NimStringDesc* f0;
NimStringDesc* LOC13;
NimStringDesc* LOC14;
LOC4 = (NIM_BOOL)0;
LOC4 = ((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 27))&63U)))!=0);
if (!(LOC4)) goto LA5;
LOC4 = (generatedheader_532201_839829468 == NIM_NIL);
LA5: ;
if (!LOC4) goto LA6;
{
if (!(((NI) 0) < (headerfile_169138_2607990831 ? headerfile_169138_2607990831->Sup.len : 0))) goto LA10;
f0 = headerfile_169138_2607990831;
}
goto LA8;
LA10: ;
{
f0 = gprojectfull_169211_2607990831;
}
LA8: ;
LOC13 = (NimStringDesc*)0;
LOC13 = completecfilepath_273854_2528170400(f0, NIM_TRUE);
LOC14 = (NimStringDesc*)0;
LOC14 = noschangeFileExt(LOC13, ((NimStringDesc*) &T839829468_14));
asgnRef((void**) (&generatedheader_532201_839829468), rawnewmodule_562663_839829468(module0, LOC14));
(*generatedheader_532201_839829468).flags |= ((NU8)1)<<((((Codegenflag529025) 3))%(sizeof(NU8)*8));
}
LA6: ;
return result0;
}
N_NIMCALL(NimStringDesc*, getcfile_563201_839829468)(Tcgen529027* m0) {
NimStringDesc* result0;
NimStringDesc* ext0;
NimStringDesc* LOC13;
NimStringDesc* LOC14;
result0 = (NimStringDesc*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
ext0 = copyString(((NimStringDesc*) &T839829468_15));
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC8;
LOC8 = (NIM_BOOL)0;
LOC8 = (gcmd_169132_2607990831 == ((Tcommands169076) 3));
if (LOC8) goto LA9;
LOC8 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 28))&31U)))!=0);
LA9: ;
if (!LOC8) goto LA10;
ext0 = copyString(((NimStringDesc*) &T839829468_16));
}
goto LA1;
LA10: ;
{
ext0 = copyString(((NimStringDesc*) &T839829468_17));
}
LA1: ;
LOC13 = (NimStringDesc*)0;
LOC13 = withpackagename_170073_2607990831((*m0).cfilename);
LOC14 = (NimStringDesc*)0;
LOC14 = completecfilepath_273854_2528170400(LOC13, NIM_TRUE);
result0 = noschangeFileExt(LOC14, ext0);
return result0;
}
N_NIMCALL(Tpasscontext341002*, myopencached_563246_839829468)(Tsym292834* module0, Trodreader332021* rd0) {
Tpasscontext341002* result0;
Tcgen529027* m0;
NimStringDesc* LOC1;
result0 = (Tpasscontext341002*)0;
m0 = newmodule_563044_839829468(module0);
LOC1 = (NimStringDesc*)0;
LOC1 = getcfile_563201_839829468(m0);
readmergeinfo_530613_2760143328(LOC1, m0);
result0 = &m0->Sup;
return result0;
}
static N_INLINE(NIM_BOOL, skipcodegen_341085_2355241294)(Tnode292802* n0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = (((NI) 0) < gerrorcounter_192069_155036129);
return result0;
}
N_NIMCALL(void, fillloc_532282_839829468)(Tloc292816* a0, Tlockind292808 k0, Ttype292840* typ0, Ropeobj178006* r0, Tstorageloc292812 s0) {
{
if (!((*a0).k == ((Tlockind292808) 0))) goto LA3;
(*a0).k = k0;
unsureAsgnRef((void**) (&(*a0).t), typ0);
(*a0).s = s0;
{
if (!((*a0).r == NIM_NIL)) goto LA7;
unsureAsgnRef((void**) (&(*a0).r), r0);
}
LA7: ;
}
LA3: ;
}
N_NIMCALL(NIM_BOOL, iskeyword_532960_839829468)(Tident199010* w0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
switch ((*w0).Sup.id) {
case ((NI) 200) ... ((NI) 262):
case ((NI) 4) ... ((NI) 70):
case ((NI) 138):
{
result0 = NIM_TRUE;
goto BeforeRet;
}
break;
default:
{
result0 = NIM_FALSE;
goto BeforeRet;
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, manglename_533205_839829468)(Tsym292834* s0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = (*s0).loc.r;
{
NIM_BOOL keeporigname0;
NIM_BOOL LOC5;
NIM_BOOL LOC6;
NIM_BOOL LOC9;
NimStringDesc* LOC10;
if (!(result0 == NIM_NIL)) goto LA3;
LOC5 = (NIM_BOOL)0;
LOC6 = (NIM_BOOL)0;
LOC6 = ((2824 &(1U<<((NU)((*s0).kind)&31U)))!=0);
if (!(LOC6)) goto LA7;
LOC6 = ((IL64(2149580812) & (*s0).flags) == 0);
LA7: ;
LOC5 = LOC6;
if (!(LOC5)) goto LA8;
LOC9 = (NIM_BOOL)0;
LOC9 = iskeyword_532960_839829468((*s0).name);
LOC5 = !(LOC9);
LA8: ;
keeporigname0 = LOC5;
LOC10 = (NimStringDesc*)0;
LOC10 = mangle_528847_2036603609((*(*s0).name).s);
result0 = rope_178277_2381377266(LOC10);
{
if (!keeporigname0) goto LA13;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_18));
}
goto LA11;
LA13: ;
{
TY533289 LOC16;
Ropeobj178006* LOC17;
Ropeobj178006* LOC18;
TY533289 LOC19;
Ropeobj178006* LOC20;
NU32 LOC21;
Ropeobj178006* LOC22;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC17 = (Ropeobj178006*)0;
LOC17 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_12), LOC16, 0);
add_178482_2381377266(&result0, LOC17);
LOC18 = (Ropeobj178006*)0;
LOC18 = rope_178401_2381377266(((NI64) ((*s0).Sup.id)));
add_178482_2381377266(&result0, LOC18);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC20 = (Ropeobj178006*)0;
LOC20 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_12), LOC19, 0);
add_178482_2381377266(&result0, LOC20);
LOC21 = (NU32)0;
LOC21 = hashowner_532977_839829468(s0);
LOC22 = (Ropeobj178006*)0;
LOC22 = rope_178401_2381377266(((NI64) (LOC21)));
add_178482_2381377266(&result0, LOC22);
}
LA11: ;
asgnRefNoCycle((void**) (&(*s0).loc.r), result0);
}
LA3: ;
return result0;
}
N_NIMCALL(void, fillprocloc_539201_839829468)(Tsym292834* sym0) {
{
Ropeobj178006* LOC5;
if (!((*sym0).loc.k == ((Tlockind292808) 0))) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = manglename_533205_839829468(sym0);
fillloc_532282_839829468((&(*sym0).loc), ((Tlockind292808) 7), (*sym0).typ, LOC5, ((Tstorageloc292812) 2));
}
LA3: ;
}
N_NIMCALL(void, useheader_532369_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
{
NimStringDesc* LOC5;
NIM_BOOL LOC6;
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 6))&15U)))!=0)) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = getstr_297230_850551059((*(*sym0).annex).path);
LOC6 = (NIM_BOOL)0;
LOC6 = includestr_147249_3771138726((&(*m0).headerfiles), LOC5);
}
LA3: ;
}
static N_INLINE(void, appendChar)(NimStringDesc* dest0, NIM_CHAR c0) {
(*dest0).data[((*dest0).Sup.len)- 0] = c0;
(*dest0).data[((NI)((*dest0).Sup.len + ((NI) 1)))- 0] = 0;
(*dest0).Sup.len += ((NI) 1);
}
N_NIMCALL(NIM_BOOL, isactivated_561431_839829468)(Tsym292834* prc0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = !(((*prc0).typ == NIM_NIL));
return result0;
}
N_NIMCALL(void, addforwardedproc_532203_839829468)(Tcgen529027* m0, Tsym292834* prc0) {
(*m0).forwardedprocs = (Tsymseq292804*) incrSeqV2(&((*m0).forwardedprocs)->Sup, sizeof(Tsym292834*));
asgnRefNoCycle((void**) (&(*m0).forwardedprocs->data[(*m0).forwardedprocs->Sup.len]), prc0);
++(*m0).forwardedprocs->Sup.len;
gforwardedprocscounter_529171_3723162438 += ((NI) 1);
}
N_NIMCALL(void, genclinedir_532725_839829468)(Ropeobj178006** r0, NimStringDesc* filename0, NI line0) {
{
TY532811 LOC5;
NimStringDesc* LOC6;
if (!((goptions_169128_2607990831 &(1U<<((NU)(((Toption169009) 10))&31U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC6 = (NimStringDesc*)0;
LOC6 = makesinglelinecstring_528835_2036603609(filename0);
LOC5[0] = rope_178277_2381377266(LOC6);
LOC5[1] = rope_178401_2381377266(((NI64) (line0)));
addf_179205_2381377266(r0, ((NimStringDesc*) &T839829468_21), LOC5, 2);
}
LA3: ;
}
static N_INLINE(NI, tolinenumber_192415_155036129)(Tlineinfo191336 info0) {
NI result0;
result0 = (NI)0;
result0 = ((NI) (info0.line));
return result0;
}
N_NIMCALL(NI, safelinenm_532721_839829468)(Tlineinfo191336 info0) {
NI result0;
result0 = (NI)0;
result0 = tolinenumber_192415_155036129(info0);
{
if (!(result0 < ((NI) 0))) goto LA3;
result0 = ((NI) 0);
}
LA3: ;
return result0;
}
N_NIMCALL(void, genclinedir_532813_839829468)(Ropeobj178006** r0, Tlineinfo191336 info0) {
NimStringDesc* LOC1;
NI LOC2;
LOC1 = (NimStringDesc*)0;
LOC1 = tofullpath_192261_155036129(info0.fileindex);
LOC2 = (NI)0;
LOC2 = safelinenm_532721_839829468(info0);
genclinedir_532725_839829468(r0, LOC1, LOC2);
}
N_NIMCALL(Tctypekind529007, mapsettype_533389_839829468)(Ttype292840* typ0) {
Tctypekind529007 result0;
NI64 LOC1;
result0 = (Tctypekind529007)0;
LOC1 = (NI64)0;
LOC1 = getsize_320135_3876443242(typ0);
switch (((NI) (LOC1))) {
case ((NI) 1):
{
result0 = ((Tctypekind529007) 4);
}
break;
case ((NI) 2):
{
result0 = ((Tctypekind529007) 5);
}
break;
case ((NI) 4):
{
result0 = ((Tctypekind529007) 6);
}
break;
case ((NI) 8):
{
result0 = ((Tctypekind529007) 7);
}
break;
default:
{
result0 = ((Tctypekind529007) 17);
}
break;
}
return result0;
}
N_NIMCALL(Tctypekind529007, maptype_533394_839829468)(Ttype292840* typ0) {
Tctypekind529007 result0;
result0 = (Tctypekind529007)0;
switch ((*typ0).kind) {
case ((Ttypekind292244) 0):
case ((Ttypekind292244) 7):
{
result0 = ((Tctypekind529007) 0);
}
break;
case ((Ttypekind292244) 1):
{
result0 = ((Tctypekind529007) 2);
}
break;
case ((Ttypekind292244) 2):
{
result0 = ((Tctypekind529007) 1);
}
break;
case ((Ttypekind292244) 19):
{
result0 = mapsettype_533389_839829468(typ0);
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 48):
{
result0 = ((Tctypekind529007) 17);
}
break;
case ((Ttypekind292244) 17):
case ((Ttypekind292244) 18):
{
result0 = ((Tctypekind529007) 19);
}
break;
case ((Ttypekind292244) 10):
case ((Ttypekind292244) 11):
case ((Ttypekind292244) 12):
case ((Ttypekind292244) 13):
case ((Ttypekind292244) 15):
case ((Ttypekind292244) 46):
case ((Ttypekind292244) 47):
case ((Ttypekind292244) 49):
case ((Ttypekind292244) 8):
{
Ttype292840* LOC8;
LOC8 = (Ttype292840*)0;
LOC8 = lastson_295377_850551059(typ0);
result0 = maptype_533394_839829468(LOC8);
}
break;
case ((Ttypekind292244) 14):
{
{
NI64 LOC12;
LOC12 = (NI64)0;
LOC12 = firstord_320001_3876443242(typ0);
if (!(LOC12 < IL64(0))) goto LA13;
result0 = ((Tctypekind529007) 6);
}
goto LA10;
LA13: ;
{
NI64 LOC16;
LOC16 = (NI64)0;
LOC16 = getsize_320135_3876443242(typ0);
switch (((NI) (LOC16))) {
case ((NI) 1):
{
result0 = ((Tctypekind529007) 13);
}
break;
case ((NI) 2):
{
result0 = ((Tctypekind529007) 14);
}
break;
case ((NI) 4):
{
result0 = ((Tctypekind529007) 6);
}
break;
case ((NI) 8):
{
result0 = ((Tctypekind529007) 7);
}
break;
default:
{
internalerror_196113_155036129(((NimStringDesc*) &T839829468_25));
}
break;
}
}
LA10: ;
}
break;
case ((Ttypekind292244) 20):
{
result0 = maptype_533394_839829468((*typ0).sons->data[((NI) 0)]);
}
break;
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 23):
case ((Ttypekind292244) 22):
{
Ttype292840* base0;
Ttype292840* LOC24;
LOC24 = (Ttype292840*)0;
LOC24 = lastson_295377_850551059(typ0);
base0 = skiptypes_296099_850551059(LOC24, IL64(211106232576256));
switch ((*base0).kind) {
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 48):
{
result0 = ((Tctypekind529007) 18);
}
break;
default:
{
result0 = ((Tctypekind529007) 20);
}
break;
}
}
break;
case ((Ttypekind292244) 26):
{
result0 = ((Tctypekind529007) 20);
}
break;
case ((Ttypekind292244) 24):
{
result0 = ((Tctypekind529007) 22);
}
break;
case ((Ttypekind292244) 25):
{
{
if (!!(((*typ0).callconv == ((Tcallingconvention292002) 8)))) goto LA32;
result0 = ((Tctypekind529007) 23);
}
goto LA30;
LA32: ;
{
result0 = ((Tctypekind529007) 19);
}
LA30: ;
}
break;
case ((Ttypekind292244) 28):
{
result0 = ((Tctypekind529007) 21);
}
break;
case ((Ttypekind292244) 29):
{
result0 = ((Tctypekind529007) 24);
}
break;
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44):
{
result0 = ((Tctypekind529007) ((NI)(((NI) ((NI)(((NI) ((*typ0).kind)) - ((NI) 31)))) + ((NI) 3))));
}
break;
case ((Ttypekind292244) 59):
{
{
Ttype292840* LOC43;
if (!!(((*typ0).n == NIM_NIL))) goto LA41;
LOC43 = (Ttype292840*)0;
LOC43 = lastson_295377_850551059(typ0);
result0 = maptype_533394_839829468(LOC43);
}
goto LA39;
LA41: ;
{
internalerror_196113_155036129(((NimStringDesc*) &T839829468_25));
}
LA39: ;
}
break;
default:
{
internalerror_196113_155036129(((NimStringDesc*) &T839829468_25));
}
break;
}
return result0;
}
N_NIMCALL(NIM_BOOL, isimportedcpptype_533478_839829468)(Ttype292840* t0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = !(((*t0).sym == NIM_NIL));
if (!(LOC1)) goto LA2;
LOC1 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(NIM_BOOL, needscomplexassignment_533511_839829468)(Ttype292840* typ0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = containsgarbagecollectedref_320117_3876443242(typ0);
return result0;
}
static N_INLINE(NIM_BOOL, isobjlackingtypefield_533515_839829468)(Ttype292840* typ0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC3;
NIM_BOOL LOC4;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = ((*typ0).kind == ((Ttypekind292244) 17));
if (!(LOC1)) goto LA2;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 2))&31U)))!=0);
if (!(LOC4)) goto LA5;
LOC4 = ((*typ0).sons->data[((NI) 0)] == NIM_NIL);
LA5: ;
LOC3 = LOC4;
if (LOC3) goto LA6;
LOC3 = ispureobject_320138_3876443242(typ0);
LA6: ;
LOC1 = LOC3;
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(NIM_BOOL, isinvalidreturntype_533550_839829468)(Ttype292840* rettype0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
{
if (!(rettype0 == NIM_NIL)) goto LA3;
result0 = NIM_TRUE;
}
goto LA1;
LA3: ;
{
Tctypekind529007 LOC6;
LOC6 = (Tctypekind529007)0;
LOC6 = maptype_533394_839829468(rettype0);
switch (LOC6) {
case ((Tctypekind529007) 17):
{
Ttype292840* LOC8;
LOC8 = (Ttype292840*)0;
LOC8 = skiptypes_296099_850551059(rettype0, IL64(211106232576256));
result0 = !(((14680064 &((NU64)1<<((NU)((*LOC8).kind)&63U)))!=0));
}
break;
case ((Tctypekind529007) 19):
{
Ttype292840* t0;
NIM_BOOL LOC16;
NIM_BOOL LOC18;
NIM_BOOL LOC20;
t0 = skiptypes_296099_850551059(rettype0, IL64(211106232576256));
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = isimportedcpptype_533478_839829468(rettype0);
if (LOC12) goto LA13;
LOC12 = isimportedcpptype_533478_839829468(t0);
LA13: ;
if (!LOC12) goto LA14;
result0 = NIM_FALSE;
goto BeforeRet;
}
LA14: ;
LOC16 = (NIM_BOOL)0;
LOC16 = needscomplexassignment_533511_839829468(t0);
if (LOC16) goto LA17;
LOC18 = (NIM_BOOL)0;
LOC18 = ((*t0).kind == ((Ttypekind292244) 17));
if (!(LOC18)) goto LA19;
LOC20 = (NIM_BOOL)0;
LOC20 = isobjlackingtypefield_533515_839829468(t0);
LOC18 = !(LOC20);
LA19: ;
LOC16 = LOC18;
LA17: ;
result0 = LOC16;
}
break;
default:
{
result0 = NIM_FALSE;
}
break;
}
}
LA1: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, typename_533292_839829468)(Ttype292840* typ0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NimStringDesc* LOC5;
if (!!(((*typ0).sym == NIM_NIL))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = mangle_528847_2036603609((*(*(*typ0).sym).name).s);
result0 = rope_178277_2381377266(LOC5);
}
goto LA1;
LA3: ;
{
TY533289 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_28), LOC7, 0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, gettypename_533313_839829468)(Ttype292840* typ0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((*typ0).sym == NIM_NIL));
if (!(LOC3)) goto LA4;
LOC3 = !(((96 & (*(*typ0).sym).flags) == 0));
LA4: ;
if (!LOC3) goto LA5;
result0 = (*(*typ0).sym).loc.r;
}
goto LA1;
LA5: ;
{
{
Ropeobj178006* LOC12;
Ropeobj178006* LOC13;
if (!((*typ0).loc.r == NIM_NIL)) goto LA10;
LOC12 = (Ropeobj178006*)0;
LOC12 = typename_533292_839829468(typ0);
LOC13 = (Ropeobj178006*)0;
LOC13 = rope_178401_2381377266(((NI64) ((*typ0).Sup.id)));
asgnRefNoCycle((void**) (&(*typ0).loc.r), HEX26_178418_2381377266(LOC12, LOC13));
}
LA10: ;
result0 = (*typ0).loc.r;
}
LA1: ;
{
NimStringDesc* LOC18;
if (!(result0 == NIM_NIL)) goto LA16;
LOC18 = (NimStringDesc*)0;
LOC18 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI292244))->Sup.len + 13);
appendString(LOC18, ((NimStringDesc*) &T839829468_29));
appendString(LOC18, reprEnum((NI)(*typ0).kind, (&NTI292244)));
internalerror_196113_155036129(LOC18);
}
LA16: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, typenameorliteral_533898_839829468)(Ttype292840* t0, NimStringDesc* literal0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = !(((*t0).sym == NIM_NIL));
if (!(LOC4)) goto LA5;
LOC4 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0);
LA5: ;
LOC3 = LOC4;
if (!(LOC3)) goto LA6;
LOC3 = ((*(*t0).sym).magic == ((Tmagic292524) 0));
LA6: ;
if (!LOC3) goto LA7;
result0 = gettypename_533313_839829468(t0);
}
goto LA1;
LA7: ;
{
result0 = rope_178277_2381377266(literal0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, getsimpletypedesc_533936_839829468)(Tcgen529027* m0, Ttype292840* typ0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
switch ((*typ0).kind) {
case ((Ttypekind292244) 26):
{
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_30));
}
break;
case ((Ttypekind292244) 28):
{
Ropeobj178006* LOC3;
LOC3 = (Ropeobj178006*)0;
LOC3 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_31));
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_32));
}
break;
case ((Ttypekind292244) 29):
{
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_33));
}
break;
case ((Ttypekind292244) 1):
{
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_34));
}
break;
case ((Ttypekind292244) 2):
{
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_35));
}
break;
case ((Ttypekind292244) 5):
{
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_18));
}
break;
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44):
{
result0 = typenameorliteral_533898_839829468(typ0, Numericaltypetostr_533941_839829468[((*typ0).kind)- 31]);
}
break;
case ((Ttypekind292244) 13):
case ((Ttypekind292244) 20):
case ((Ttypekind292244) 15):
{
result0 = getsimpletypedesc_533936_839829468(m0, (*typ0).sons->data[((NI) 0)]);
}
break;
case ((Ttypekind292244) 59):
{
{
Ttype292840* LOC15;
if (!!(((*typ0).n == NIM_NIL))) goto LA13;
LOC15 = (Ttype292840*)0;
LOC15 = lastson_295377_850551059(typ0);
result0 = getsimpletypedesc_533936_839829468(m0, LOC15);
}
goto LA11;
LA13: ;
{
internalerror_196113_155036129(((NimStringDesc*) &T839829468_50));
}
LA11: ;
}
break;
case ((Ttypekind292244) 11):
{
Ttype292840* LOC18;
LOC18 = (Ttype292840*)0;
LOC18 = lastson_295377_850551059(typ0);
result0 = getsimpletypedesc_533936_839829468(m0, LOC18);
}
break;
default:
{
result0 = NIM_NIL;
}
break;
}
return result0;
}
N_NIMCALL(Ropeobj178006*, cachegettype_533593_839829468)(Tidtable292850 tab0, Ttype292840* key0) {
Ropeobj178006* result0;
Tidobj199004* LOC1;
TNimObject* LOC2;
result0 = (Ropeobj178006*)0;
LOC1 = (Tidobj199004*)0;
LOC1 = &key0->Sup;
LOC2 = (TNimObject*)0;
LOC2 = idtableget_299086_2984716966(tab0, LOC1);
result0 = ((Ropeobj178006*) (LOC2));
return result0;
}
N_NIMCALL(Ropeobj178006*, gettypepre_533972_839829468)(Tcgen529027* m0, Ttype292840* typ0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!(typ0 == NIM_NIL)) goto LA3;
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_26));
}
goto LA1;
LA3: ;
{
result0 = getsimpletypedesc_533936_839829468(m0, typ0);
{
if (!(result0 == NIM_NIL)) goto LA8;
result0 = cachegettype_533593_839829468((*m0).typecache, typ0);
}
LA8: ;
}
LA1: ;
return result0;
}
N_NIMCALL(NIM_BOOL, isimportedtype_533451_839829468)(Ttype292840* t0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = !(((*t0).sym == NIM_NIL));
if (!(LOC1)) goto LA2;
LOC1 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(NimStringDesc*, getforwardstructformat_534015_839829468)(Tcgen529027* m0) {
NimStringDesc* result0;
result0 = (NimStringDesc*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
result0 = copyString(((NimStringDesc*) &T839829468_54));
}
goto LA1;
LA5: ;
{
result0 = copyString(((NimStringDesc*) &T839829468_55));
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, structorunion_534001_839829468)(Ttype292840* t0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!(((*t0).flags &(1U<<((NU)(((Ttypeflag292431) 1))&31U)))!=0)) goto LA3;
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_56));
}
goto LA1;
LA3: ;
{
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_57));
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, gettypeforward_534039_839829468)(Tcgen529027* m0, Ttype292840* typ0) {
Ropeobj178006* result0;
{ result0 = (Ropeobj178006*)0;
result0 = cachegettype_533593_839829468((*m0).forwtypecache, typ0);
{
if (!!((result0 == NIM_NIL))) goto LA3;
goto BeforeRet;
}
LA3: ;
result0 = gettypepre_533972_839829468(m0, typ0);
{
if (!!((result0 == NIM_NIL))) goto LA7;
goto BeforeRet;
}
LA7: ;
switch ((*typ0).kind) {
case ((Ttypekind292244) 24):
case ((Ttypekind292244) 18):
case ((Ttypekind292244) 17):
{
Tidobj199004* LOC17;
TNimObject* LOC18;
result0 = gettypename_533313_839829468(typ0);
{
NIM_BOOL LOC12;
NimStringDesc* LOC15;
TY532811 LOC16;
LOC12 = (NIM_BOOL)0;
LOC12 = isimportedtype_533451_839829468(typ0);
if (!!(LOC12)) goto LA13;
LOC15 = (NimStringDesc*)0;
LOC15 = getforwardstructformat_534015_839829468(m0);
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = structorunion_534001_839829468(typ0);
LOC16[1] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 2))- 0], LOC15, LOC16, 2);
}
LA13: ;
LOC17 = (Tidobj199004*)0;
LOC17 = &typ0->Sup;
LOC18 = (TNimObject*)0;
LOC18 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).forwtypecache), LOC17, LOC18);
}
break;
default:
{
NimStringDesc* LOC20;
LOC20 = (NimStringDesc*)0;
LOC20 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI292244))->Sup.len + 16);
appendString(LOC20, ((NimStringDesc*) &T839829468_58));
appendString(LOC20, reprEnum((NI)(*typ0).kind, (&NTI292244)));
appendChar(LOC20, 41);
internalerror_196113_155036129(LOC20);
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, pushtype_533958_839829468)(Tcgen529027* m0, Ttype292840* typ0) {
(*m0).typestack = (Ttypeseq292836*) incrSeqV2(&((*m0).typestack)->Sup, sizeof(Ttype292840*));
asgnRefNoCycle((void**) (&(*m0).typestack->data[(*m0).typestack->Sup.len]), typ0);
++(*m0).typestack->Sup.len;
}
N_NIMCALL(Ropeobj178006*, gettypedescweak_534079_839829468)(Tcgen529027* m0, Ttype292840* t0, Intset268030* check0) {
Ropeobj178006* result0;
Ttype292840* etb0;
result0 = (Ropeobj178006*)0;
etb0 = skiptypes_296099_850551059(t0, IL64(211106232576256));
switch ((*etb0).kind) {
case ((Ttypekind292244) 17):
case ((Ttypekind292244) 18):
{
{
NIM_BOOL LOC4;
LOC4 = (NIM_BOOL)0;
LOC4 = isimportedcpptype_533478_839829468(etb0);
if (!(LOC4)) goto LA5;
LOC4 = ((*t0).kind == ((Ttypekind292244) 11));
LA5: ;
if (!LOC4) goto LA6;
result0 = gettypedescaux_533505_839829468(m0, t0, check0);
}
goto LA2;
LA6: ;
{
Ttype292840* x0;
x0 = getuniquetype_528640_2036603609(etb0);
result0 = gettypeforward_534039_839829468(m0, x0);
pushtype_533958_839829468(m0, x0);
}
LA2: ;
}
break;
case ((Ttypekind292244) 24):
{
Ttype292840* x0;
Ropeobj178006* LOC10;
x0 = getuniquetype_528640_2036603609(etb0);
LOC10 = (Ropeobj178006*)0;
LOC10 = gettypeforward_534039_839829468(m0, x0);
result0 = HEX26_178447_2381377266(LOC10, ((NimStringDesc*) &T839829468_53));
pushtype_533958_839829468(m0, x0);
}
break;
default:
{
result0 = gettypedescaux_533505_839829468(m0, t0, check0);
}
break;
}
return result0;
}
static N_INLINE(NI, len_293081_850551059)(Tnode292802* n0) {
NI result0;
result0 = (NI)0;
{
if (!(*n0).kindU.S6.sons == 0) goto LA3;
result0 = ((NI) 0);
}
goto LA1;
LA3: ;
{
result0 = ((*n0).kindU.S6.sons ? (*n0).kindU.S6.sons->Sup.len : 0);
}
LA1: ;
return result0;
}
N_NIMCALL(void, appcg_532632_839829468)(Tcgen529027* m0, Ropeobj178006** c0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006* LOC1;
LOC1 = (Ropeobj178006*)0;
LOC1 = ropecg_532407_839829468(m0, frmt0, args0, args0Len0);
add_178482_2381377266(c0, LOC1);
}
N_NIMCALL(NIM_BOOL, scancppgenericslot_534827_839829468)(NimStringDesc* pat0, NI* cursor0, NI* outidx0, NI* outstars0) {
NIM_BOOL result0;
NI begin0;
{ result0 = (NIM_BOOL)0;
(*cursor0) += ((NI) 1);
begin0 = (*cursor0);
{
while (1) {
if (!((NU8)(pat0->data[(*cursor0)]) == (NU8)(42))) goto LA2;
(*cursor0) += ((NI) 1);
} LA2: ;
}
{
if (!(((NU8)(pat0->data[(*cursor0)])) >= ((NU8)(48)) && ((NU8)(pat0->data[(*cursor0)])) <= ((NU8)(57)))) goto LA5;
(*outidx0) = ((NI) ((NI)(((NI) (((NU8)(pat0->data[(*cursor0)])))) - ((NI) 48))));
(*outstars0) = (NI)((*cursor0) - begin0);
(*cursor0) += ((NI) 1);
result0 = NIM_TRUE;
goto BeforeRet;
}
goto LA3;
LA5: ;
{
result0 = NIM_FALSE;
goto BeforeRet;
}
LA3: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ttype292840*, resolvestarsincpptype_534891_839829468)(Ttype292840* typ0, NI idx0, NI stars0) {
Ttype292840* result0;
result0 = (Ttype292840*)0;
{
NI LOC3;
LOC3 = (NI)0;
LOC3 = len_295339_850551059(typ0);
if (!(LOC3 <= idx0)) goto LA4;
internalerror_196113_155036129(((NimStringDesc*) &T839829468_81));
}
LA4: ;
result0 = (*typ0).sons->data[idx0];
{
NI i_534906_839829468;
NI res_534931_839829468;
i_534906_839829468 = (NI)0;
res_534931_839829468 = ((NI) 1);
{
while (1) {
if (!(res_534931_839829468 <= stars0)) goto LA8;
i_534906_839829468 = res_534931_839829468;
{
NIM_BOOL LOC11;
NI LOC13;
LOC11 = (NIM_BOOL)0;
LOC11 = !((result0 == NIM_NIL));
if (!(LOC11)) goto LA12;
LOC13 = (NI)0;
LOC13 = len_295339_850551059(result0);
LOC11 = (((NI) 0) < LOC13);
LA12: ;
if (!LOC11) goto LA14;
{
if (!((*result0).kind == ((Ttypekind292244) 11))) goto LA18;
result0 = (*result0).sons->data[((NI) 1)];
}
goto LA16;
LA18: ;
{
result0 = elemtype_320394_3876443242(result0);
}
LA16: ;
}
LA14: ;
res_534931_839829468 += ((NI) 1);
} LA8: ;
}
}
return result0;
}
N_NIMCALL(NimStringDesc*, manglefield_532973_839829468)(Tident199010* name0) {
NimStringDesc* result0;
result0 = (NimStringDesc*)0;
result0 = mangle_528847_2036603609((*name0).s);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = iskeyword_532960_839829468(name0);
if (!LOC3) goto LA4;
result0->data[((NI) 0)] = nsuToUpperAsciiChar(result0->data[((NI) 0)]);
}
LA4: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, manglerecfieldname_534361_839829468)(Tsym292834* field0, Ttype292840* rectype0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((*rectype0).sym == NIM_NIL));
if (!(LOC3)) goto LA4;
LOC3 = !(((96 & (*(*rectype0).sym).flags) == 0));
LA4: ;
if (!LOC3) goto LA5;
result0 = (*field0).loc.r;
}
goto LA1;
LA5: ;
{
NimStringDesc* LOC8;
LOC8 = (NimStringDesc*)0;
LOC8 = manglefield_532973_839829468((*field0).name);
result0 = rope_178277_2381377266(LOC8);
}
LA1: ;
{
if (!(result0 == NIM_NIL)) goto LA11;
internalerror_196100_155036129((*field0).info, ((NimStringDesc*) &T839829468_96));
}
LA11: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genrecordfieldsaux_534421_839829468)(Tcgen529027* m0, Tnode292802* n0, Ropeobj178006* accessexpr0, Ttype292840* rectype0, Intset268030* check0) {
Ropeobj178006* result0;
Ropeobj178006* ae0;
Ropeobj178006* uname0;
Ropeobj178006* sname0;
Ropeobj178006* a0;
Tnode292802* k0;
Tsym292834* field0;
{ result0 = (Ropeobj178006*)0;
ae0 = (Ropeobj178006*)0;
uname0 = (Ropeobj178006*)0;
sname0 = (Ropeobj178006*)0;
a0 = (Ropeobj178006*)0;
k0 = (Tnode292802*)0;
field0 = (Tsym292834*)0;
result0 = NIM_NIL;
switch ((*n0).kind) {
case ((Tnodekind292020) 138):
{
{
NI i_534447_839829468;
NI HEX3Atmp_534620_839829468;
NI LOC3;
NI res_534623_839829468;
i_534447_839829468 = (NI)0;
HEX3Atmp_534620_839829468 = (NI)0;
LOC3 = (NI)0;
LOC3 = sonslen_295351_850551059(n0);
HEX3Atmp_534620_839829468 = (NI)(LOC3 - ((NI) 1));
res_534623_839829468 = ((NI) 0);
{
while (1) {
Ropeobj178006* LOC6;
if (!(res_534623_839829468 <= HEX3Atmp_534620_839829468)) goto LA5;
i_534447_839829468 = res_534623_839829468;
LOC6 = (Ropeobj178006*)0;
LOC6 = genrecordfieldsaux_534421_839829468(m0, (*n0).kindU.S6.sons->data[i_534447_839829468], accessexpr0, rectype0, check0);
add_178482_2381377266(&result0, LOC6);
res_534623_839829468 += ((NI) 1);
} LA5: ;
}
}
}
break;
case ((Tnodekind292020) 139):
{
Ropeobj178006* LOC12;
NimStringDesc* LOC13;
NimStringDesc* LOC14;
Ropeobj178006* unionbody0;
{
if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3)))) goto LA10;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_89));
}
LA10: ;
LOC12 = (Ropeobj178006*)0;
LOC12 = genrecordfieldsaux_534421_839829468(m0, (*n0).kindU.S6.sons->data[((NI) 0)], accessexpr0, rectype0, check0);
add_178482_2381377266(&result0, LOC12);
LOC13 = (NimStringDesc*)0;
LOC14 = (NimStringDesc*)0;
LOC14 = mangle_528847_2036603609((*(*(*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).name).s);
LOC13 = rawNewString(LOC14->Sup.len + 1);
appendString(LOC13, LOC14);
appendChar(LOC13, 85);
uname0 = rope_178277_2381377266(LOC13);
{
TY532811 LOC19;
if (!!((accessexpr0 == NIM_NIL))) goto LA17;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = accessexpr0;
LOC19[1] = uname0;
ae0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC19, 2);
}
goto LA15;
LA17: ;
{
ae0 = uname0;
}
LA15: ;
unionbody0 = NIM_NIL;
{
NI i_534491_839829468;
NI HEX3Atmp_534629_839829468;
NI LOC22;
NI res_534632_839829468;
i_534491_839829468 = (NI)0;
HEX3Atmp_534629_839829468 = (NI)0;
LOC22 = (NI)0;
LOC22 = sonslen_295351_850551059(n0);
HEX3Atmp_534629_839829468 = (NI)(LOC22 - ((NI) 1));
res_534632_839829468 = ((NI) 1);
{
while (1) {
if (!(res_534632_839829468 <= HEX3Atmp_534629_839829468)) goto LA24;
i_534491_839829468 = res_534632_839829468;
switch ((*(*n0).kindU.S6.sons->data[i_534491_839829468]).kind) {
case ((Tnodekind292020) 85):
case ((Tnodekind292020) 88):
{
k0 = lastson_295364_850551059((*n0).kindU.S6.sons->data[i_534491_839829468]);
{
Ropeobj178006* LOC30;
TY532811 LOC31;
Ropeobj178006* LOC32;
if (!!(((*k0).kind == ((Tnodekind292020) 3)))) goto LA28;
LOC30 = (Ropeobj178006*)0;
LOC30 = rope_178401_2381377266(((NI64) (i_534491_839829468)));
sname0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_91), LOC30);
memset((void*)LOC31, 0, sizeof(LOC31));
LOC31[0] = ae0;
LOC31[1] = sname0;
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC31, 2);
a0 = genrecordfieldsaux_534421_839829468(m0, k0, LOC32, rectype0, check0);
{
TY178507 LOC37;
if (!!((a0 == NIM_NIL))) goto LA35;
add_178487_2381377266(&unionbody0, ((NimStringDesc*) &T839829468_92));
add_178482_2381377266(&unionbody0, a0);
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = sname0;
addf_179205_2381377266(&unionbody0, ((NimStringDesc*) &T839829468_93), LOC37, 1);
}
LA35: ;
}
goto LA26;
LA28: ;
{
Ropeobj178006* LOC39;
LOC39 = (Ropeobj178006*)0;
LOC39 = genrecordfieldsaux_534421_839829468(m0, k0, ae0, rectype0, check0);
add_178482_2381377266(&unionbody0, LOC39);
}
LA26: ;
}
break;
default:
{
internalerror_196113_155036129(((NimStringDesc*) &T839829468_94));
}
break;
}
res_534632_839829468 += ((NI) 1);
} LA24: ;
}
}
{
TY532811 LOC45;
if (!!((unionbody0 == NIM_NIL))) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC45[0] = unionbody0;
LOC45[1] = uname0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_95), LOC45, 2);
}
LA43: ;
}
break;
case ((Tnodekind292020) 3):
{
field0 = (*n0).kindU.S4.sym;
{
if (!((*(*field0).typ).kind == ((Ttypekind292244) 62))) goto LA49;
goto BeforeRet;
}
LA49: ;
sname0 = manglerecfieldname_534361_839829468(field0, rectype0);
{
TY532811 LOC55;
if (!!((accessexpr0 == NIM_NIL))) goto LA53;
memset((void*)LOC55, 0, sizeof(LOC55));
LOC55[0] = accessexpr0;
LOC55[1] = sname0;
ae0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC55, 2);
}
goto LA51;
LA53: ;
{
ae0 = sname0;
}
LA51: ;
fillloc_532282_839829468((&(*field0).loc), ((Tlockind292808) 5), (*field0).typ, ae0, ((Tstorageloc292812) 0));
{
NIM_BOOL LOC59;
Ttype292840* fieldtype0;
LOC59 = (NIM_BOOL)0;
LOC59 = isimportedcpptype_533478_839829468(rectype0);
if (!!(LOC59)) goto LA60;
fieldtype0 = skiptypes_296099_850551059((*field0).loc.t, IL64(211106232576256));
{
NIM_BOOL LOC64;
TY532811 LOC68;
Ttype292840* LOC69;
LOC64 = (NIM_BOOL)0;
LOC64 = ((*fieldtype0).kind == ((Ttypekind292244) 16));
if (!(LOC64)) goto LA65;
LOC64 = (((*fieldtype0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0);
LA65: ;
if (!LOC64) goto LA66;
memset((void*)LOC68, 0, sizeof(LOC68));
LOC69 = (Ttype292840*)0;
LOC69 = elemtype_320394_3876443242(fieldtype0);
LOC68[0] = gettypedescaux_533505_839829468(m0, LOC69, check0);
LOC68[1] = sname0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_97), LOC68, 2);
}
goto LA62;
LA66: ;
{
TY532811 LOC73;
if (!((*fieldtype0).kind == ((Ttypekind292244) 24))) goto LA71;
memset((void*)LOC73, 0, sizeof(LOC73));
LOC73[0] = gettypedescweak_534079_839829468(m0, (*field0).loc.t, check0);
LOC73[1] = sname0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_54), LOC73, 2);
}
goto LA62;
LA71: ;
{
TY535238 LOC77;
NimStringDesc* LOC78;
if (!!(((*field0).kindU.S4.bitsize == ((NI) 0)))) goto LA75;
memset((void*)LOC77, 0, sizeof(LOC77));
LOC77[0] = gettypedescaux_533505_839829468(m0, (*field0).loc.t, check0);
LOC77[1] = sname0;
LOC78 = (NimStringDesc*)0;
LOC78 = nimIntToStr((*field0).kindU.S4.bitsize);
LOC77[2] = rope_178277_2381377266(LOC78);
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_98), LOC77, 3);
}
goto LA62;
LA75: ;
{
TY532811 LOC80;
memset((void*)LOC80, 0, sizeof(LOC80));
LOC80[0] = gettypedescaux_533505_839829468(m0, (*field0).loc.t, check0);
LOC80[1] = sname0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_54), LOC80, 2);
}
LA62: ;
}
LA60: ;
}
break;
default:
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_99));
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, getrecordfields_534636_839829468)(Tcgen529027* m0, Ttype292840* typ0, Intset268030* check0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = genrecordfieldsaux_534421_839829468(m0, (*typ0).n, NIM_NIL, typ0, check0);
return result0;
}
N_NIMCALL(Ropeobj178006*, getrecorddesc_534643_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0, Intset268030* check0) {
Ropeobj178006* result0;
NIM_BOOL hasfield0;
Ropeobj178006* attribute0;
TY535238 LOC6;
Ropeobj178006* desc0;
NimStringDesc* LOC46;
result0 = (Ropeobj178006*)0;
hasfield0 = NIM_FALSE;
{
if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 21))&31U)))!=0)) goto LA3;
attribute0 = rope_178277_2381377266(Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field19);
}
goto LA1;
LA3: ;
{
attribute0 = NIM_NIL;
}
LA1: ;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = structorunion_534001_839829468(typ0);
LOC6[1] = name0;
LOC6[2] = attribute0;
result0 = ropecg_532407_839829468(m0, Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field18, LOC6, 3);
{
if (!((*typ0).kind == ((Ttypekind292244) 17))) goto LA9;
{
if (!((*typ0).sons->data[((NI) 0)] == NIM_NIL)) goto LA13;
{
NIM_BOOL LOC17;
NIM_BOOL LOC18;
TY533289 LOC23;
LOC17 = (NIM_BOOL)0;
LOC18 = (NIM_BOOL)0;
LOC18 = !(((*typ0).sym == NIM_NIL));
if (!(LOC18)) goto LA19;
LOC18 = (((*(*typ0).sym).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0);
LA19: ;
LOC17 = LOC18;
if (LOC17) goto LA20;
LOC17 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 2))&31U)))!=0);
LA20: ;
if (!LOC17) goto LA21;
memset((void*)LOC23, 0, sizeof(LOC23));
appcg_532632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_85), LOC23, 0);
}
goto LA15;
LA21: ;
{
TY532811 LOC25;
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = name0;
LOC25[1] = attribute0;
appcg_532632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_86), LOC25, 2);
hasfield0 = NIM_TRUE;
}
LA15: ;
}
goto LA11;
LA13: ;
{
NIM_BOOL LOC27;
TY178507 LOC31;
Ttype292840* LOC32;
LOC27 = (NIM_BOOL)0;
LOC27 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC27) goto LA28;
LOC27 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA28: ;
if (!LOC27) goto LA29;
memset((void*)LOC31, 0, sizeof(LOC31));
LOC32 = (Ttype292840*)0;
LOC32 = skiptypes_296099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106247215360));
LOC31[0] = gettypedescaux_533505_839829468(m0, LOC32, check0);
appcg_532632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_87), LOC31, 1);
hasfield0 = NIM_TRUE;
}
goto LA11;
LA29: ;
{
TY178507 LOC34;
Ttype292840* LOC35;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC35 = (Ttype292840*)0;
LOC35 = skiptypes_296099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106247215360));
LOC34[0] = gettypedescaux_533505_839829468(m0, LOC35, check0);
appcg_532632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_88), LOC34, 1);
hasfield0 = NIM_TRUE;
}
LA11: ;
}
goto LA7;
LA9: ;
{
TY178507 LOC37;
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = name0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_85), LOC37, 1);
}
LA7: ;
desc0 = getrecordfields_534636_839829468(m0, typ0, check0);
{
NIM_BOOL LOC40;
TY533289 LOC44;
LOC40 = (NIM_BOOL)0;
LOC40 = (desc0 == NIM_NIL);
if (!(LOC40)) goto LA41;
LOC40 = !(hasfield0);
LA41: ;
if (!LOC40) goto LA42;
memset((void*)LOC44, 0, sizeof(LOC44));
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_100), LOC44, 0);
}
goto LA38;
LA42: ;
{
add_178482_2381377266(&result0, desc0);
}
LA38: ;
LOC46 = (NimStringDesc*)0;
LOC46 = rawNewString(tnl_176644_4151366050->Sup.len + 2);
appendString(LOC46, ((NimStringDesc*) &T839829468_101));
appendString(LOC46, tnl_176644_4151366050);
add_178487_2381377266(&result0, LOC46);
return result0;
}
N_NIMCALL(Ropeobj178006*, gettupledesc_534777_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0, Intset268030* check0) {
Ropeobj178006* result0;
TY532811 LOC1;
Ropeobj178006* desc0;
NimStringDesc* LOC13;
result0 = (Ropeobj178006*)0;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = structorunion_534001_839829468(typ0);
LOC1[1] = name0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_102), LOC1, 2);
desc0 = NIM_NIL;
{
NI i_534799_839829468;
NI HEX3Atmp_534820_839829468;
NI LOC3;
NI res_534823_839829468;
i_534799_839829468 = (NI)0;
HEX3Atmp_534820_839829468 = (NI)0;
LOC3 = (NI)0;
LOC3 = sonslen_295327_850551059(typ0);
HEX3Atmp_534820_839829468 = (NI)(LOC3 - ((NI) 1));
res_534823_839829468 = ((NI) 0);
{
while (1) {
TY532811 LOC6;
if (!(res_534823_839829468 <= HEX3Atmp_534820_839829468)) goto LA5;
i_534799_839829468 = res_534823_839829468;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = gettypedescaux_533505_839829468(m0, (*typ0).sons->data[i_534799_839829468], check0);
LOC6[1] = rope_178401_2381377266(((NI64) (i_534799_839829468)));
addf_179205_2381377266(&desc0, ((NimStringDesc*) &T839829468_103), LOC6, 2);
res_534823_839829468 += ((NI) 1);
} LA5: ;
}
}
{
NimStringDesc* LOC11;
if (!(desc0 == NIM_NIL)) goto LA9;
LOC11 = (NimStringDesc*)0;
LOC11 = rawNewString(tnl_176644_4151366050->Sup.len + 11);
appendString(LOC11, ((NimStringDesc*) &T839829468_104));
appendString(LOC11, tnl_176644_4151366050);
add_178487_2381377266(&result0, LOC11);
}
goto LA7;
LA9: ;
{
add_178482_2381377266(&result0, desc0);
}
LA7: ;
LOC13 = (NimStringDesc*)0;
LOC13 = rawNewString(tnl_176644_4151366050->Sup.len + 2);
appendString(LOC13, ((NimStringDesc*) &T839829468_101));
appendString(LOC13, tnl_176644_4151366050);
add_178487_2381377266(&result0, LOC13);
return result0;
}
N_NIMCALL(Ropeobj178006*, gettypedescaux_533505_839829468)(Tcgen529027* m0, Ttype292840* typ0, Intset268030* check0) {
Ropeobj178006* result0;
Ttype292840* t_534942_839829468;
{ result0 = (Ropeobj178006*)0;
t_534942_839829468 = getuniquetype_528640_2036603609(typ0);
{
if (!(t_534942_839829468 == NIM_NIL)) goto LA3;
internalerror_196113_155036129(((NimStringDesc*) &T839829468_27));
}
LA3: ;
{
if (!!(((*t_534942_839829468).sym == NIM_NIL))) goto LA7;
useheader_532369_839829468(m0, (*t_534942_839829468).sym);
}
LA7: ;
result0 = gettypepre_533972_839829468(m0, t_534942_839829468);
{
if (!!((result0 == NIM_NIL))) goto LA11;
goto BeforeRet;
}
LA11: ;
{
NIM_BOOL LOC15;
LOC15 = (NIM_BOOL)0;
LOC15 = containsorincl_268862_2627731572(check0, (*t_534942_839829468).Sup.id);
if (!LOC15) goto LA16;
{
NIM_BOOL LOC20;
NimStringDesc* LOC24;
NimStringDesc* LOC25;
LOC20 = (NIM_BOOL)0;
LOC20 = isimportedcpptype_533478_839829468(typ0);
if (LOC20) goto LA21;
LOC20 = isimportedcpptype_533478_839829468(t_534942_839829468);
LA21: ;
if (!!(LOC20)) goto LA22;
LOC24 = (NimStringDesc*)0;
LOC25 = (NimStringDesc*)0;
LOC25 = typetostring_320017_3876443242(typ0, ((Tprefereddesc320011) 0));
LOC24 = rawNewString(LOC25->Sup.len + 28);
appendString(LOC24, ((NimStringDesc*) &T839829468_51));
appendString(LOC24, LOC25);
internalerror_196113_155036129(LOC24);
}
LA22: ;
}
LA16: ;
switch ((*t_534942_839829468).kind) {
case ((Ttypekind292244) 22):
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 23):
{
NimStringDesc* star0;
Ttype292840* et0;
Ttype292840* LOC38;
Ttype292840* etb0;
{
NIM_BOOL LOC29;
NIM_BOOL LOC30;
NIM_BOOL LOC33;
LOC29 = (NIM_BOOL)0;
LOC30 = (NIM_BOOL)0;
LOC30 = ((*t_534942_839829468).kind == ((Ttypekind292244) 23));
if (!(LOC30)) goto LA31;
LOC30 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0));
LA31: ;
LOC29 = LOC30;
if (!(LOC29)) goto LA32;
LOC33 = (NIM_BOOL)0;
LOC33 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC33) goto LA34;
LOC33 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA34: ;
LOC29 = LOC33;
LA32: ;
if (!LOC29) goto LA35;
star0 = copyString(((NimStringDesc*) &T839829468_52));
}
goto LA27;
LA35: ;
{
star0 = copyString(((NimStringDesc*) &T839829468_53));
}
LA27: ;
LOC38 = (Ttype292840*)0;
LOC38 = skiptypes_296099_850551059(typ0, IL64(211106232576256));
et0 = lastson_295377_850551059(LOC38);
etb0 = skiptypes_296099_850551059(et0, IL64(211106232576256));
{
if (!((IL64(281475110993936) &((NU64)1<<((NU)((*etb0).kind)&63U)))!=0)) goto LA41;
et0 = elemtype_320394_3876443242(etb0);
etb0 = skiptypes_296099_850551059(et0, IL64(211106232576256));
star0->data[((NI) 0)] = 42;
}
LA41: ;
switch ((*etb0).kind) {
case ((Ttypekind292244) 17):
case ((Ttypekind292244) 18):
{
{
NIM_BOOL LOC46;
Ropeobj178006* LOC50;
LOC46 = (NIM_BOOL)0;
LOC46 = isimportedcpptype_533478_839829468(etb0);
if (!(LOC46)) goto LA47;
LOC46 = ((*et0).kind == ((Ttypekind292244) 11));
LA47: ;
if (!LOC46) goto LA48;
LOC50 = (Ropeobj178006*)0;
LOC50 = gettypedescaux_533505_839829468(m0, et0, check0);
result0 = HEX26_178447_2381377266(LOC50, star0);
}
goto LA44;
LA48: ;
{
Ttype292840* x0;
Ropeobj178006* name0;
Tidobj199004* LOC52;
TNimObject* LOC53;
x0 = getuniquetype_528640_2036603609(etb0);
name0 = gettypeforward_534039_839829468(m0, x0);
result0 = HEX26_178447_2381377266(name0, star0);
LOC52 = (Tidobj199004*)0;
LOC52 = &t_534942_839829468->Sup;
LOC53 = (TNimObject*)0;
LOC53 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC52, LOC53);
pushtype_533958_839829468(m0, x0);
}
LA44: ;
}
break;
case ((Ttypekind292244) 24):
{
Ttype292840* x0;
Ropeobj178006* name0;
Ropeobj178006* LOC55;
Tidobj199004* LOC56;
TNimObject* LOC57;
x0 = getuniquetype_528640_2036603609(etb0);
name0 = gettypeforward_534039_839829468(m0, x0);
LOC55 = (Ropeobj178006*)0;
LOC55 = HEX26_178447_2381377266(name0, ((NimStringDesc*) &T839829468_53));
result0 = HEX26_178447_2381377266(LOC55, star0);
LOC56 = (Tidobj199004*)0;
LOC56 = &t_534942_839829468->Sup;
LOC57 = (TNimObject*)0;
LOC57 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC56, LOC57);
pushtype_533958_839829468(m0, x0);
}
break;
default:
{
Ropeobj178006* LOC59;
Tidobj199004* LOC60;
TNimObject* LOC61;
LOC59 = (Ropeobj178006*)0;
LOC59 = gettypedescaux_533505_839829468(m0, et0, check0);
result0 = HEX26_178447_2381377266(LOC59, star0);
LOC60 = (Tidobj199004*)0;
LOC60 = &t_534942_839829468->Sup;
LOC61 = (TNimObject*)0;
LOC61 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC60, LOC61);
}
break;
}
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
Ropeobj178006* LOC63;
Tidobj199004* LOC64;
TNimObject* LOC65;
LOC63 = (Ropeobj178006*)0;
LOC63 = gettypedescweak_534079_839829468(m0, (*t_534942_839829468).sons->data[((NI) 0)], check0);
result0 = HEX26_178447_2381377266(LOC63, ((NimStringDesc*) &T839829468_53));
LOC64 = (Tidobj199004*)0;
LOC64 = &t_534942_839829468->Sup;
LOC65 = (TNimObject*)0;
LOC65 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC64, LOC65);
}
break;
case ((Ttypekind292244) 20):
case ((Ttypekind292244) 14):
{
Ttype292840* t0;
{
if (!((*t_534942_839829468).kind == ((Ttypekind292244) 20))) goto LA69;
t0 = lastson_295377_850551059(t_534942_839829468);
}
goto LA67;
LA69: ;
{
t0 = t_534942_839829468;
}
LA67: ;
result0 = cachegettype_533593_839829468((*m0).typecache, t0);
{
if (!(result0 == NIM_NIL)) goto LA74;
result0 = gettypename_533313_839829468(t0);
{
NIM_BOOL LOC78;
NIM_BOOL LOC80;
Tidobj199004* LOC84;
TNimObject* LOC85;
NI size0;
NU32 owner0;
LOC78 = (NIM_BOOL)0;
LOC78 = isimportedcpptype_533478_839829468(t0);
if (LOC78) goto LA79;
LOC80 = (NIM_BOOL)0;
LOC80 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0);
if (!(LOC80)) goto LA81;
LOC80 = ((*(*t0).sym).magic == ((Tmagic292524) 0));
LA81: ;
LOC78 = LOC80;
LA79: ;
if (!!(LOC78)) goto LA82;
LOC84 = (Tidobj199004*)0;
LOC84 = &t0->Sup;
LOC85 = (TNimObject*)0;
LOC85 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC84, LOC85);
size0 = (NI)0;
{
NI64 LOC88;
TY178507 LOC91;
LOC88 = (NI64)0;
LOC88 = firstord_320001_3876443242(t0);
if (!(LOC88 < IL64(0))) goto LA89;
memset((void*)LOC91, 0, sizeof(LOC91));
LOC91[0] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_59), LOC91, 1);
size0 = ((NI) 4);
}
goto LA86;
LA89: ;
{
NI64 LOC93;
LOC93 = (NI64)0;
LOC93 = getsize_320135_3876443242(t0);
size0 = ((NI) (LOC93));
switch (size0) {
case ((NI) 1):
{
TY178507 LOC95;
memset((void*)LOC95, 0, sizeof(LOC95));
LOC95[0] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_60), LOC95, 1);
}
break;
case ((NI) 2):
{
TY178507 LOC97;
memset((void*)LOC97, 0, sizeof(LOC97));
LOC97[0] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_61), LOC97, 1);
}
break;
case ((NI) 4):
{
TY178507 LOC99;
memset((void*)LOC99, 0, sizeof(LOC99));
LOC99[0] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_59), LOC99, 1);
}
break;
case ((NI) 8):
{
TY178507 LOC101;
memset((void*)LOC101, 0, sizeof(LOC101));
LOC101[0] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_62), LOC101, 1);
}
break;
default:
{
internalerror_196100_155036129((*(*t0).sym).info, ((NimStringDesc*) &T839829468_63));
}
break;
}
}
LA86: ;
owner0 = hashowner_532977_839829468((*t0).sym);
{
NIM_BOOL LOC105;
TY203017* vals0;
Enumdesc203007 LOC114;
LOC105 = (NIM_BOOL)0;
LOC105 = hasenum_203230_1926258066((&gdebuginfo_203470_1926258066), (*(*(*t0).sym).name).s, ((NI) ((*(*t0).sym).info.line)), owner0);
if (!!(LOC105)) goto LA106;
vals0 = (TY203017*) newSeq((&NTI203017), 0);
{
NI i_535144_839829468;
NI HEX3Atmp_535649_839829468;
NI LOC109;
NI res_535652_839829468;
i_535144_839829468 = (NI)0;
HEX3Atmp_535649_839829468 = (NI)0;
LOC109 = (NI)0;
LOC109 = len_293081_850551059((*t0).n);
HEX3Atmp_535649_839829468 = (NI)(LOC109 - ((NI) 1));
res_535652_839829468 = ((NI) 0);
{
while (1) {
Tsym292834* field0;
TY203018 LOC112;
NimStringDesc* LOC113;
if (!(res_535652_839829468 <= HEX3Atmp_535649_839829468)) goto LA111;
i_535144_839829468 = res_535652_839829468;
field0 = (*(*(*t0).n).kindU.S6.sons->data[i_535144_839829468]).kindU.S4.sym;
memset((void*)(&LOC112), 0, sizeof(LOC112));
LOC112.Field0 = copyString((*(*field0).name).s);
LOC112.Field1 = (*field0).position;
vals0 = (TY203017*) incrSeqV2(&(vals0)->Sup, sizeof(TY203018));
LOC113 = (NimStringDesc*)0;
LOC113 = vals0->data[vals0->Sup.len].Field0; vals0->data[vals0->Sup.len].Field0 = copyStringRC1(LOC112.Field0);
if (LOC113) nimGCunrefNoCycle(LOC113);
vals0->data[vals0->Sup.len].Field1 = LOC112.Field1;
++vals0->Sup.len;
res_535652_839829468 += ((NI) 1);
} LA111: ;
}
}
memset((void*)(&LOC114), 0, sizeof(LOC114));
memset((void*)(&LOC114), 0, sizeof(LOC114));
LOC114.size = size0;
LOC114.owner = owner0;
LOC114.id = (*(*t0).sym).Sup.id;
LOC114.name = copyString((*(*(*t0).sym).name).s);
genericSeqAssign((&LOC114.values), vals0, (&NTI203017));
registerenum_203419_1926258066((&gdebuginfo_203470_1926258066), (&LOC114));
}
LA106: ;
}
LA82: ;
}
LA74: ;
}
break;
case ((Ttypekind292244) 25):
{
Tidobj199004* LOC116;
TNimObject* LOC117;
Ropeobj178006* rettype0;
Ropeobj178006* desc0;
result0 = gettypename_533313_839829468(t_534942_839829468);
LOC116 = (Tidobj199004*)0;
LOC116 = &t_534942_839829468->Sup;
LOC117 = (TNimObject*)0;
LOC117 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC116, LOC117);
rettype0 = (Ropeobj178006*)0;
desc0 = (Ropeobj178006*)0;
genprocparams_534115_839829468(m0, t_534942_839829468, &rettype0, &desc0, check0, NIM_TRUE, NIM_TRUE);
{
NIM_BOOL LOC120;
LOC120 = (NIM_BOOL)0;
LOC120 = isimportedtype_533451_839829468(t_534942_839829468);
if (!!(LOC120)) goto LA121;
{
TY535235 LOC127;
if (!!(((*t_534942_839829468).callconv == ((Tcallingconvention292002) 8)))) goto LA125;
memset((void*)LOC127, 0, sizeof(LOC127));
LOC127[0] = rope_178277_2381377266(Callingconvtostr_533587_839829468[((*t_534942_839829468).callconv)- 0]);
LOC127[1] = rettype0;
LOC127[2] = result0;
LOC127[3] = desc0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_64), LOC127, 4);
}
goto LA123;
LA125: ;
{
TY535238 LOC129;
memset((void*)LOC129, 0, sizeof(LOC129));
LOC129[0] = result0;
LOC129[1] = rettype0;
LOC129[2] = desc0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_75), LOC129, 3);
}
LA123: ;
}
LA121: ;
}
break;
case ((Ttypekind292244) 24):
{
Tidobj199004* LOC144;
Ropeobj178006* LOC145;
TNimObject* LOC146;
result0 = cachegettype_533593_839829468((*m0).forwtypecache, t_534942_839829468);
{
Tidobj199004* LOC142;
TNimObject* LOC143;
if (!(result0 == NIM_NIL)) goto LA133;
result0 = gettypename_533313_839829468(t_534942_839829468);
{
NIM_BOOL LOC137;
NimStringDesc* LOC140;
TY532811 LOC141;
LOC137 = (NIM_BOOL)0;
LOC137 = isimportedtype_533451_839829468(t_534942_839829468);
if (!!(LOC137)) goto LA138;
LOC140 = (NimStringDesc*)0;
LOC140 = getforwardstructformat_534015_839829468(m0);
memset((void*)LOC141, 0, sizeof(LOC141));
LOC141[0] = structorunion_534001_839829468(t_534942_839829468);
LOC141[1] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 2))- 0], LOC140, LOC141, 2);
}
LA138: ;
LOC142 = (Tidobj199004*)0;
LOC142 = &t_534942_839829468->Sup;
LOC143 = (TNimObject*)0;
LOC143 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).forwtypecache), LOC142, LOC143);
}
LA133: ;
LOC144 = (Tidobj199004*)0;
LOC144 = &t_534942_839829468->Sup;
LOC145 = (Ropeobj178006*)0;
LOC145 = HEX26_178447_2381377266(result0, ((NimStringDesc*) &T839829468_53));
LOC146 = (TNimObject*)0;
LOC146 = &LOC145->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC144, LOC146);
{
NIM_BOOL LOC149;
LOC149 = (NIM_BOOL)0;
LOC149 = isimportedtype_533451_839829468(t_534942_839829468);
if (!!(LOC149)) goto LA150;
{
Ttype292840* LOC154;
NimStringDesc* LOC157;
NimStringDesc* LOC158;
TY532811 LOC166;
LOC154 = (Ttype292840*)0;
LOC154 = skiptypes_296099_850551059((*t_534942_839829468).sons->data[((NI) 0)], IL64(211106232576256));
if (!!(((*LOC154).kind == ((Ttypekind292244) 3)))) goto LA155;
LOC157 = (NimStringDesc*)0;
LOC158 = (NimStringDesc*)0;
{
NIM_BOOL LOC161;
LOC161 = (NIM_BOOL)0;
LOC161 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC161) goto LA162;
LOC161 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA162: ;
if (!LOC161) goto LA163;
LOC158 = copyString(((NimStringDesc*) &T839829468_76));
}
goto LA159;
LA163: ;
{
LOC158 = copyString(((NimStringDesc*) &T839829468_77));
}
LA159: ;
LOC157 = rawNewString(LOC158->Sup.len + 31);
appendString(LOC157, LOC158);
appendString(LOC157, ((NimStringDesc*) &T839829468_78));
memset((void*)LOC166, 0, sizeof(LOC166));
LOC166[0] = gettypedescaux_533505_839829468(m0, (*t_534942_839829468).sons->data[((NI) 0)], check0);
LOC166[1] = result0;
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 4))- 0], LOC157, LOC166, 2);
}
goto LA152;
LA155: ;
{
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_79));
}
LA152: ;
}
LA150: ;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_53));
}
break;
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 16):
{
NI64 n0;
Tidobj199004* LOC173;
TNimObject* LOC174;
n0 = lengthord_320007_3876443242(t_534942_839829468);
{
if (!(n0 <= IL64(0))) goto LA171;
n0 = IL64(1);
}
LA171: ;
result0 = gettypename_533313_839829468(t_534942_839829468);
LOC173 = (Tidobj199004*)0;
LOC173 = &t_534942_839829468->Sup;
LOC174 = (TNimObject*)0;
LOC174 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC173, LOC174);
{
NIM_BOOL LOC177;
Ropeobj178006* foo0;
TY535238 LOC180;
LOC177 = (NIM_BOOL)0;
LOC177 = isimportedtype_533451_839829468(t_534942_839829468);
if (!!(LOC177)) goto LA178;
foo0 = gettypedescaux_533505_839829468(m0, (*t_534942_839829468).sons->data[((NI) 1)], check0);
memset((void*)LOC180, 0, sizeof(LOC180));
LOC180[0] = foo0;
LOC180[1] = result0;
LOC180[2] = rope_178401_2381377266(n0);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_80), LOC180, 3);
}
LA178: ;
}
break;
case ((Ttypekind292244) 17):
case ((Ttypekind292244) 18):
{
{
NIM_BOOL LOC184;
Ropeobj178006* cppname0;
NI i0;
NI chunkstart0;
Ropeobj178006* LOC226;
LOC184 = (NIM_BOOL)0;
LOC184 = isimportedcpptype_533478_839829468(t_534942_839829468);
if (!(LOC184)) goto LA185;
LOC184 = ((*typ0).kind == ((Ttypekind292244) 11));
LA185: ;
if (!LOC184) goto LA186;
cppname0 = gettypename_533313_839829468(t_534942_839829468);
i0 = ((NI) 0);
chunkstart0 = ((NI) 0);
{
while (1) {
if (!(i0 < ((*cppname0).data ? (*cppname0).data->Sup.len : 0))) goto LA189;
{
NI chunkend0;
NI idx0;
NI stars0;
if (!((NU8)((*cppname0).data->data[i0]) == (NU8)(39))) goto LA192;
chunkend0 = (i0 - 1);
idx0 = (NI)0;
stars0 = (NI)0;
{
NIM_BOOL LOC196;
NimStringDesc* LOC199;
Ttype292840* typeinslot0;
LOC196 = (NIM_BOOL)0;
LOC196 = scancppgenericslot_534827_839829468((*cppname0).data, (&i0), (&idx0), (&stars0));
if (!LOC196) goto LA197;
LOC199 = (NimStringDesc*)0;
LOC199 = copyStrLast((*cppname0).data, chunkstart0, chunkend0);
add_178487_2381377266(&result0, LOC199);
chunkstart0 = i0;
typeinslot0 = resolvestarsincpptype_534891_839829468(typ0, (NI)(idx0 + ((NI) 1)), stars0);
{
NIM_BOOL LOC202;
TY533289 LOC206;
Ropeobj178006* LOC207;
LOC202 = (NIM_BOOL)0;
LOC202 = (typeinslot0 == NIM_NIL);
if (LOC202) goto LA203;
LOC202 = ((*typeinslot0).kind == ((Ttypekind292244) 62));
LA203: ;
if (!LOC202) goto LA204;
memset((void*)LOC206, 0, sizeof(LOC206));
LOC207 = (Ropeobj178006*)0;
LOC207 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_26), LOC206, 0);
add_178482_2381377266(&result0, LOC207);
}
goto LA200;
LA204: ;
{
Ropeobj178006* LOC209;
LOC209 = (Ropeobj178006*)0;
LOC209 = gettypedescaux_533505_839829468(m0, typeinslot0, check0);
add_178482_2381377266(&result0, LOC209);
}
LA200: ;
}
LA197: ;
}
goto LA190;
LA192: ;
{
i0 += ((NI) 1);
}
LA190: ;
} LA189: ;
}
{
NimStringDesc* LOC215;
if (!!((chunkstart0 == ((NI) 0)))) goto LA213;
LOC215 = (NimStringDesc*)0;
LOC215 = copyStr((*cppname0).data, chunkstart0);
add_178487_2381377266(&result0, LOC215);
}
goto LA211;
LA213: ;
{
result0 = HEX26_178447_2381377266(cppname0, ((NimStringDesc*) &T839829468_82));
{
NI i_535516_839829468;
NI HEX3Atmp_535665_839829468;
NI LOC218;
NI res_535668_839829468;
i_535516_839829468 = (NI)0;
HEX3Atmp_535665_839829468 = (NI)0;
LOC218 = (NI)0;
LOC218 = len_295339_850551059(typ0);
HEX3Atmp_535665_839829468 = (NI)(LOC218 - ((NI) 2));
res_535668_839829468 = ((NI) 1);
{
while (1) {
Ropeobj178006* LOC225;
if (!(res_535668_839829468 <= HEX3Atmp_535665_839829468)) goto LA220;
i_535516_839829468 = res_535668_839829468;
{
if (!(((NI) 1) < i_535516_839829468)) goto LA223;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_83));
}
LA223: ;
LOC225 = (Ropeobj178006*)0;
LOC225 = gettypedescaux_533505_839829468(m0, (*typ0).sons->data[i_535516_839829468], check0);
add_178482_2381377266(&result0, LOC225);
res_535668_839829468 += ((NI) 1);
} LA220: ;
}
}
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_84));
}
LA211: ;
LOC226 = (Ropeobj178006*)0;
LOC226 = getrecorddesc_534643_839829468(m0, t_534942_839829468, result0, check0);
}
goto LA182;
LA186: ;
{
Tidobj199004* LOC241;
TNimObject* LOC242;
Ropeobj178006* recdesc0;
result0 = cachegettype_533593_839829468((*m0).forwtypecache, t_534942_839829468);
{
Tidobj199004* LOC239;
TNimObject* LOC240;
if (!(result0 == NIM_NIL)) goto LA230;
result0 = gettypename_533313_839829468(t_534942_839829468);
{
NIM_BOOL LOC234;
NimStringDesc* LOC237;
TY532811 LOC238;
LOC234 = (NIM_BOOL)0;
LOC234 = isimportedtype_533451_839829468(t_534942_839829468);
if (!!(LOC234)) goto LA235;
LOC237 = (NimStringDesc*)0;
LOC237 = getforwardstructformat_534015_839829468(m0);
memset((void*)LOC238, 0, sizeof(LOC238));
LOC238[0] = structorunion_534001_839829468(t_534942_839829468);
LOC238[1] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 2))- 0], LOC237, LOC238, 2);
}
LA235: ;
LOC239 = (Tidobj199004*)0;
LOC239 = &t_534942_839829468->Sup;
LOC240 = (TNimObject*)0;
LOC240 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).forwtypecache), LOC239, LOC240);
}
LA230: ;
LOC241 = (Tidobj199004*)0;
LOC241 = &t_534942_839829468->Sup;
LOC242 = (TNimObject*)0;
LOC242 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC241, LOC242);
{
if (!!(((*t_534942_839829468).kind == ((Ttypekind292244) 18)))) goto LA245;
recdesc0 = getrecorddesc_534643_839829468(m0, t_534942_839829468, result0, check0);
}
goto LA243;
LA245: ;
{
recdesc0 = gettupledesc_534777_839829468(m0, t_534942_839829468, result0, check0);
}
LA243: ;
{
NIM_BOOL LOC250;
LOC250 = (NIM_BOOL)0;
LOC250 = isimportedtype_533451_839829468(t_534942_839829468);
if (!!(LOC250)) goto LA251;
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], recdesc0);
}
LA251: ;
}
LA182: ;
}
break;
case ((Ttypekind292244) 19):
{
Ttype292840* LOC254;
Ropeobj178006* LOC255;
Tidobj199004* LOC256;
TNimObject* LOC257;
LOC254 = (Ttype292840*)0;
LOC254 = lastson_295377_850551059(t_534942_839829468);
LOC255 = (Ropeobj178006*)0;
LOC255 = gettypename_533313_839829468(LOC254);
result0 = HEX26_178447_2381377266(LOC255, ((NimStringDesc*) &T839829468_105));
LOC256 = (Tidobj199004*)0;
LOC256 = &t_534942_839829468->Sup;
LOC257 = (TNimObject*)0;
LOC257 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC256, LOC257);
{
NIM_BOOL LOC260;
NI s0;
NI64 LOC263;
LOC260 = (NIM_BOOL)0;
LOC260 = isimportedtype_533451_839829468(t_534942_839829468);
if (!!(LOC260)) goto LA261;
LOC263 = (NI64)0;
LOC263 = getsize_320135_3876443242(t_534942_839829468);
s0 = ((NI) (LOC263));
switch (s0) {
case ((NI) 1):
case ((NI) 2):
case ((NI) 4):
case ((NI) 8):
{
TY532811 LOC265;
memset((void*)LOC265, 0, sizeof(LOC265));
LOC265[0] = result0;
LOC265[1] = rope_178401_2381377266(((NI64) ((NI)(s0 * ((NI) 8)))));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_106), LOC265, 2);
}
break;
default:
{
TY532811 LOC267;
NI64 LOC268;
memset((void*)LOC267, 0, sizeof(LOC267));
LOC267[0] = result0;
LOC268 = (NI64)0;
LOC268 = getsize_320135_3876443242(t_534942_839829468);
LOC267[1] = rope_178401_2381377266(LOC268);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_107), LOC267, 2);
}
break;
}
}
LA261: ;
}
break;
case ((Ttypekind292244) 11):
case ((Ttypekind292244) 13):
case ((Ttypekind292244) 15):
case ((Ttypekind292244) 46):
case ((Ttypekind292244) 47):
case ((Ttypekind292244) 49):
case ((Ttypekind292244) 8):
{
Ttype292840* LOC270;
LOC270 = (Ttype292840*)0;
LOC270 = lastson_295377_850551059(t_534942_839829468);
result0 = gettypedescaux_533505_839829468(m0, LOC270, check0);
}
break;
default:
{
NimStringDesc* LOC272;
LOC272 = (NimStringDesc*)0;
LOC272 = rawNewString(reprEnum((NI)(*t_534942_839829468).kind, (&NTI292244))->Sup.len + 16);
appendString(LOC272, ((NimStringDesc*) &T839829468_108));
appendString(LOC272, reprEnum((NI)(*t_534942_839829468).kind, (&NTI292244)));
appendChar(LOC272, 41);
internalerror_196113_155036129(LOC272);
result0 = NIM_NIL;
}
break;
}
excl_268841_2627731572(check0, (*t_534942_839829468).Sup.id);
}BeforeRet: ;
return result0;
}
static N_INLINE(NIM_BOOL, iscompiletimeonly_328706_3876443242)(Ttype292840* t0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = ((IL64(576460752303423744) &((NU64)1<<((NU)((*t0).kind)&63U)))!=0);
return result0;
}
N_NIMCALL(Tstorageloc292812, paramstorageloc_534098_839829468)(Tsym292834* param0) {
Tstorageloc292812 result0;
result0 = (Tstorageloc292812)0;
{
Ttype292840* LOC3;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059((*param0).typ, 8388864);
if (!!(((IL64(281475110993936) &((NU64)1<<((NU)((*LOC3).kind)&63U)))!=0))) goto LA4;
result0 = ((Tstorageloc292812) 2);
}
goto LA1;
LA4: ;
{
result0 = ((Tstorageloc292812) 0);
}
LA1: ;
return result0;
}
N_NIMCALL(NIM_BOOL, ccgintroducedptr_533611_839829468)(Tsym292834* s0) {
NIM_BOOL result0;
Ttype292840* pt0;
{ result0 = (NIM_BOOL)0;
pt0 = skiptypes_296099_850551059((*s0).typ, IL64(211106232576256));
{
if (!(((*pt0).flags &(1U<<((NU)(((Ttypeflag292431) 13))&31U)))!=0)) goto LA3;
result0 = NIM_TRUE;
goto BeforeRet;
}
goto LA1;
LA3: ;
{
if (!(((*pt0).flags &(1U<<((NU)(((Ttypeflag292431) 12))&31U)))!=0)) goto LA6;
result0 = NIM_FALSE;
goto BeforeRet;
}
goto LA1;
LA6: ;
LA1: ;
switch ((*pt0).kind) {
case ((Ttypekind292244) 17):
{
{
NIM_BOOL LOC11;
NI64 LOC13;
LOC11 = (NIM_BOOL)0;
LOC11 = (((*s0).options &(1U<<((NU)(((Toption169009) 18))&31U)))!=0);
if (LOC11) goto LA12;
LOC13 = (NI64)0;
LOC13 = getsize_320135_3876443242(pt0);
LOC11 = (((NI64) ((NI)(floatsize_176642_4151366050 * ((NI) 2)))) < LOC13);
LA12: ;
if (!LOC11) goto LA14;
result0 = NIM_TRUE;
}
goto LA9;
LA14: ;
{
NIM_BOOL LOC17;
LOC17 = (NIM_BOOL)0;
LOC17 = (((*pt0).flags &(1U<<((NU)(((Ttypeflag292431) 2))&31U)))!=0);
if (!(LOC17)) goto LA18;
LOC17 = ((*pt0).sons->data[((NI) 0)] == NIM_NIL);
LA18: ;
if (!LOC17) goto LA19;
result0 = NIM_FALSE;
}
goto LA9;
LA19: ;
{
result0 = NIM_TRUE;
}
LA9: ;
}
break;
case ((Ttypekind292244) 18):
{
NIM_BOOL LOC23;
NI64 LOC24;
LOC23 = (NIM_BOOL)0;
LOC24 = (NI64)0;
LOC24 = getsize_320135_3876443242(pt0);
LOC23 = (((NI64) ((NI)(floatsize_176642_4151366050 * ((NI) 2)))) < LOC24);
if (LOC23) goto LA25;
LOC23 = (((*s0).options &(1U<<((NU)(((Toption169009) 18))&31U)))!=0);
LA25: ;
result0 = LOC23;
}
break;
default:
{
result0 = NIM_FALSE;
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(Tctypekind529007, mapreturntype_533447_839829468)(Ttype292840* typ0) {
Tctypekind529007 result0;
result0 = (Tctypekind529007)0;
result0 = maptype_533394_839829468(typ0);
return result0;
}
N_NIMCALL(void, genprocparams_534115_839829468)(Tcgen529027* m0, Ttype292840* t0, Ropeobj178006** rettype0, Ropeobj178006** params0, Intset268030* check0, NIM_BOOL declareenvironment0, NIM_BOOL weakdep0) {
unsureAsgnRef((void**) (&(*params0)), NIM_NIL);
{
NIM_BOOL LOC3;
TY533289 LOC7;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*t0).sons->data[((NI) 0)] == NIM_NIL);
if (LOC3) goto LA4;
LOC3 = isinvalidreturntype_533550_839829468((*t0).sons->data[((NI) 0)]);
LA4: ;
if (!LOC3) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
unsureAsgnRef((void**) (&(*rettype0)), HEX25_178905_2381377266(((NimStringDesc*) &T839829468_26), LOC7, 0));
}
goto LA1;
LA5: ;
{
unsureAsgnRef((void**) (&(*rettype0)), gettypedescaux_533505_839829468(m0, (*t0).sons->data[((NI) 0)], check0));
}
LA1: ;
{
NI i_534152_839829468;
NI HEX3Atmp_534353_839829468;
NI LOC10;
NI res_534356_839829468;
i_534152_839829468 = (NI)0;
HEX3Atmp_534353_839829468 = (NI)0;
LOC10 = (NI)0;
LOC10 = sonslen_295351_850551059((*t0).n);
HEX3Atmp_534353_839829468 = (NI)(LOC10 - ((NI) 1));
res_534356_839829468 = ((NI) 1);
{
while (1) {
if (!(res_534356_839829468 <= HEX3Atmp_534353_839829468)) goto LA12;
i_534152_839829468 = res_534356_839829468;
{
Tsym292834* param0;
Ropeobj178006* LOC29;
Tstorageloc292812 LOC30;
TY533289 LOC45;
Ropeobj178006* LOC46;
Ttype292840* arr0;
NI j0;
{
if (!!(((*(*(*t0).n).kindU.S6.sons->data[i_534152_839829468]).kind == ((Tnodekind292020) 3)))) goto LA16;
internalerror_196100_155036129((*(*t0).n).info, ((NimStringDesc*) &T839829468_109));
}
LA16: ;
param0 = (*(*(*t0).n).kindU.S6.sons->data[i_534152_839829468]).kindU.S4.sym;
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = iscompiletimeonly_328706_3876443242((*param0).typ);
if (!LOC20) goto LA21;
goto LA13;
}
LA21: ;
{
TY533289 LOC27;
Ropeobj178006* LOC28;
if (!!(((*params0) == NIM_NIL))) goto LA25;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC28 = (Ropeobj178006*)0;
LOC28 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC27, 0);
add_178482_2381377266(params0, LOC28);
}
LA25: ;
LOC29 = (Ropeobj178006*)0;
LOC29 = manglename_533205_839829468(param0);
LOC30 = (Tstorageloc292812)0;
LOC30 = paramstorageloc_534098_839829468(param0);
fillloc_532282_839829468((&(*param0).loc), ((Tlockind292808) 4), (*param0).typ, LOC29, LOC30);
{
NIM_BOOL LOC33;
Ropeobj178006* LOC36;
TY533289 LOC37;
Ropeobj178006* LOC38;
LOC33 = (NIM_BOOL)0;
LOC33 = ccgintroducedptr_533611_839829468(param0);
if (!LOC33) goto LA34;
LOC36 = (Ropeobj178006*)0;
LOC36 = gettypedescweak_534079_839829468(m0, (*param0).typ, check0);
add_178482_2381377266(params0, LOC36);
memset((void*)LOC37, 0, sizeof(LOC37));
LOC38 = (Ropeobj178006*)0;
LOC38 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_53), LOC37, 0);
add_178482_2381377266(params0, LOC38);
(*param0).loc.flags |= ((NU16)1)<<((((Tlocflag292810) 0))%(sizeof(NU16)*8));
(*param0).loc.s = ((Tstorageloc292812) 0);
}
goto LA31;
LA34: ;
{
Ropeobj178006* LOC42;
if (!weakdep0) goto LA40;
LOC42 = (Ropeobj178006*)0;
LOC42 = gettypedescweak_534079_839829468(m0, (*param0).typ, check0);
add_178482_2381377266(params0, LOC42);
}
goto LA31;
LA40: ;
{
Ropeobj178006* LOC44;
LOC44 = (Ropeobj178006*)0;
LOC44 = gettypedescaux_533505_839829468(m0, (*param0).typ, check0);
add_178482_2381377266(params0, LOC44);
}
LA31: ;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC46 = (Ropeobj178006*)0;
LOC46 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_111), LOC45, 0);
add_178482_2381377266(params0, LOC46);
add_178482_2381377266(params0, (*param0).loc.r);
arr0 = (*param0).typ;
{
if (!((*arr0).kind == ((Ttypekind292244) 23))) goto LA49;
arr0 = (*arr0).sons->data[((NI) 0)];
}
LA49: ;
j0 = ((NI) 0);
{
while (1) {
TY532811 LOC57;
if (!((IL64(281475110928384) &((NU64)1<<((NU)((*arr0).kind)&63U)))!=0)) goto LA52;
{
if (!((*(*param0).typ).kind == ((Ttypekind292244) 23))) goto LA55;
(*param0).loc.s = ((Tstorageloc292812) 0);
}
LA55: ;
memset((void*)LOC57, 0, sizeof(LOC57));
LOC57[0] = (*param0).loc.r;
LOC57[1] = rope_178401_2381377266(((NI64) (j0)));
addf_179205_2381377266(params0, ((NimStringDesc*) &T839829468_112), LOC57, 2);
j0 += ((NI) 1);
arr0 = (*arr0).sons->data[((NI) 0)];
} LA52: ;
}
} LA13: ;
res_534356_839829468 += ((NI) 1);
} LA12: ;
}
}
{
NIM_BOOL LOC60;
Ttype292840* arr0;
TY533289 LOC76;
LOC60 = (NIM_BOOL)0;
LOC60 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL));
if (!(LOC60)) goto LA61;
LOC60 = isinvalidreturntype_533550_839829468((*t0).sons->data[((NI) 0)]);
LA61: ;
if (!LOC60) goto LA62;
arr0 = (*t0).sons->data[((NI) 0)];
{
if (!!(((*params0) == NIM_NIL))) goto LA66;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_110));
}
LA66: ;
{
Tctypekind529007 LOC70;
Ropeobj178006* LOC73;
LOC70 = (Tctypekind529007)0;
LOC70 = mapreturntype_533447_839829468((*t0).sons->data[((NI) 0)]);
if (!!((LOC70 == ((Tctypekind529007) 17)))) goto LA71;
LOC73 = (Ropeobj178006*)0;
LOC73 = gettypedescweak_534079_839829468(m0, arr0, check0);
add_178482_2381377266(params0, LOC73);
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_53));
}
goto LA68;
LA71: ;
{
Ropeobj178006* LOC75;
LOC75 = (Ropeobj178006*)0;
LOC75 = gettypedescaux_533505_839829468(m0, arr0, check0);
add_178482_2381377266(params0, LOC75);
}
LA68: ;
memset((void*)LOC76, 0, sizeof(LOC76));
addf_179205_2381377266(params0, ((NimStringDesc*) &T839829468_113), LOC76, 0);
}
LA62: ;
{
NIM_BOOL LOC79;
LOC79 = (NIM_BOOL)0;
LOC79 = ((*t0).callconv == ((Tcallingconvention292002) 8));
if (!(LOC79)) goto LA80;
LOC79 = declareenvironment0;
LA80: ;
if (!LOC79) goto LA81;
{
if (!!(((*params0) == NIM_NIL))) goto LA85;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_110));
}
LA85: ;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_114));
}
LA81: ;
{
if (!(((*t0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0)) goto LA89;
{
if (!!(((*params0) == NIM_NIL))) goto LA93;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_110));
}
LA93: ;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_115));
}
LA89: ;
{
if (!((*params0) == NIM_NIL)) goto LA97;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_116));
}
goto LA95;
LA97: ;
{
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_117));
}
LA95: ;
unsureAsgnRef((void**) (&(*params0)), HEX26_178452_2381377266(((NimStringDesc*) &T839829468_118), (*params0)));
}
N_NIMCALL(Ropeobj178006*, genprocheader_535867_839829468)(Tcgen529027* m0, Tsym292834* prc0) {
Ropeobj178006* result0;
Ropeobj178006* rettype0;
Ropeobj178006* params0;
Intset268030 check0;
Ropeobj178006* LOC13;
result0 = (Ropeobj178006*)0;
rettype0 = (Ropeobj178006*)0;
params0 = (Ropeobj178006*)0;
genclinedir_532813_839829468(&result0, (*prc0).info);
{
if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 5))&15U)))!=0)) goto LA3;
{
if (!(((*m0).flags &(1U<<((NU)(((Codegenflag529025) 3))&7U)))!=0)) goto LA7;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_22));
}
goto LA5;
LA7: ;
{
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_23));
}
LA5: ;
}
goto LA1;
LA3: ;
{
if (!((*(*prc0).typ).callconv == ((Tcallingconvention292002) 5))) goto LA11;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_24));
}
goto LA1;
LA11: ;
LA1: ;
memset((void*)(&check0), 0, sizeof(check0));
chckNil((void*)(&check0));
memset((void*)(&check0), 0, sizeof(check0));
initintset_268885_2627731572((&check0));
LOC13 = (Ropeobj178006*)0;
LOC13 = manglename_533205_839829468(prc0);
fillloc_532282_839829468((&(*prc0).loc), ((Tlockind292808) 7), (*prc0).typ, LOC13, ((Tstorageloc292812) 0));
genprocparams_534115_839829468(m0, (*prc0).typ, &rettype0, ¶ms0, (&check0), NIM_TRUE, NIM_FALSE);
{
TY535235 LOC18;
if (!(*prc0).constraint == 0) goto LA16;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rope_178277_2381377266(Callingconvtostr_533587_839829468[((*(*prc0).typ).callconv)- 0]);
LOC18[1] = rettype0;
LOC18[2] = (*prc0).loc.r;
LOC18[3] = params0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_119), LOC18, 4);
}
goto LA14;
LA16: ;
{
TY535238 LOC20;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = rettype0;
LOC20[1] = (*prc0).loc.r;
LOC20[2] = params0;
result0 = HEX25_178905_2381377266((*(*prc0).constraint).kindU.S3.strval, LOC20, 3);
}
LA14: ;
return result0;
}
static N_INLINE(Tnode292802*, HEX5BHEX5D_293238_850551059)(Tnode292802* n0, NI i0) {
Tnode292802* result0;
result0 = (Tnode292802*)0;
result0 = (*n0).kindU.S6.sons->data[i0];
return result0;
}
N_NIMCALL(Tnode292802*, easyresultasgn_560191_839829468)(Tnode292802* n0) {
Tnode292802* result0;
{ result0 = (Tnode292802*)0;
switch ((*n0).kind) {
case ((Tnodekind292020) 115):
case ((Tnodekind292020) 126):
{
NI i0;
i0 = ((NI) 0);
{
while (1) {
NIM_BOOL LOC4;
NI LOC5;
Tnode292802* LOC7;
LOC4 = (NIM_BOOL)0;
LOC5 = (NI)0;
LOC5 = len_293081_850551059(n0);
LOC4 = (i0 < LOC5);
if (!(LOC4)) goto LA6;
LOC7 = (Tnode292802*)0;
LOC7 = HEX5BHEX5D_293238_850551059(n0, i0);
LOC4 = ((*LOC7).kind == ((Tnodekind292020) 1) || (*LOC7).kind >= ((Tnodekind292020) 79) && (*LOC7).kind <= ((Tnodekind292020) 81) || (*LOC7).kind == ((Tnodekind292020) 84) || (*LOC7).kind == ((Tnodekind292020) 98) || (*LOC7).kind == ((Tnodekind292020) 101) || (*LOC7).kind == ((Tnodekind292020) 125));
LA6: ;
if (!LOC4) goto LA3;
i0 += ((NI) 1);
} LA3: ;
}
{
NI LOC10;
Tnode292802* LOC13;
LOC10 = (NI)0;
LOC10 = len_293081_850551059(n0);
if (!(i0 < LOC10)) goto LA11;
LOC13 = (Tnode292802*)0;
LOC13 = HEX5BHEX5D_293238_850551059(n0, i0);
result0 = easyresultasgn_560191_839829468(LOC13);
}
LA11: ;
}
break;
case ((Tnodekind292020) 73):
case ((Tnodekind292020) 74):
{
{
NIM_BOOL LOC17;
Tnode292802* LOC18;
Tnode292802* LOC20;
LOC17 = (NIM_BOOL)0;
LOC18 = (Tnode292802*)0;
LOC18 = HEX5BHEX5D_293238_850551059(n0, ((NI) 0));
LOC17 = ((*LOC18).kind == ((Tnodekind292020) 3));
if (!(LOC17)) goto LA19;
LOC20 = (Tnode292802*)0;
LOC20 = HEX5BHEX5D_293238_850551059(n0, ((NI) 0));
LOC17 = (((Tsymkind292435) 11) == (*(*LOC20).kindU.S4.sym).kind);
LA19: ;
if (!LOC17) goto LA21;
(*n0).flags |= ((NU16)1)<<((((Tnodeflag292427) 14))%(sizeof(NU16)*8));
result0 = HEX5BHEX5D_293238_850551059(n0, ((NI) 1));
goto BeforeRet;
}
LA21: ;
}
break;
case ((Tnodekind292020) 109):
{
{
NI LOC26;
Tnode292802* LOC29;
LOC26 = (NI)0;
LOC26 = len_293081_850551059(n0);
if (!(((NI) 0) < LOC26)) goto LA27;
LOC29 = (Tnode292802*)0;
LOC29 = HEX5BHEX5D_293238_850551059(n0, ((NI) 0));
result0 = easyresultasgn_560191_839829468(LOC29);
{
if (!!((result0 == NIM_NIL))) goto LA32;
(*n0).flags |= ((NU16)1)<<((((Tnodeflag292427) 14))%(sizeof(NU16)*8));
}
LA32: ;
}
LA27: ;
}
break;
default:
{
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, gettypedesc_535673_839829468)(Tcgen529027* m0, Ttype292840* typ0) {
Ropeobj178006* result0;
Intset268030 check0;
result0 = (Ropeobj178006*)0;
memset((void*)(&check0), 0, sizeof(check0));
chckNil((void*)(&check0));
memset((void*)(&check0), 0, sizeof(check0));
initintset_268885_2627731572((&check0));
result0 = gettypedescaux_533505_839829468(m0, typ0, (&check0));
return result0;
}
N_NIMCALL(Ropeobj178006*, localvardecl_538532_839829468)(Tcproc529021* p0, Tsym292834* s0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
Ropeobj178006* LOC5;
if (!((*s0).loc.k == ((Tlockind292808) 0))) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = manglename_533205_839829468(s0);
fillloc_532282_839829468((&(*s0).loc), ((Tlockind292808) 2), (*s0).typ, LOC5, ((Tstorageloc292812) 2));
{
if (!((*s0).kind == ((Tsymkind292435) 9))) goto LA8;
(*s0).loc.flags |= ((NU16)1)<<((((Tlocflag292810) 2))%(sizeof(NU16)*8));
}
LA8: ;
}
LA3: ;
result0 = gettypedesc_535673_839829468((*p0).module, (*s0).loc.t);
{
if (!(*s0).constraint == 0) goto LA12;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 8))&31U)))!=0)) goto LA16;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_121));
}
LA16: ;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 7))&31U)))!=0)) goto LA20;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_122));
}
LA20: ;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_111));
add_178482_2381377266(&result0, (*s0).loc.r);
}
goto LA10;
LA12: ;
{
TY532811 LOC23;
memset((void*)LOC23, 0, sizeof(LOC23));
LOC23[0] = result0;
LOC23[1] = (*s0).loc.r;
result0 = HEX25_178905_2381377266((*(*s0).constraint).kindU.S3.strval, LOC23, 2);
}
LA10: ;
return result0;
}
N_NIMCALL(void, initloc_532273_839829468)(Tloc292816* result0, Tlockind292808 k0, Ttype292840* typ0, Tstorageloc292812 s0) {
(*result0).k = k0;
(*result0).s = s0;
unsureAsgnRef((void**) (&(*result0).t), typ0);
unsureAsgnRef((void**) (&(*result0).r), NIM_NIL);
(*result0).flags = 0;
}
N_NIMCALL(void, initlocexprsingleuse_539289_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* result0) {
initloc_532273_839829468(result0, ((Tlockind292808) 0), (*e0).typ, ((Tstorageloc292812) 0));
(*result0).flags |= ((NU16)1)<<((((Tlocflag292810) 8))%(sizeof(NU16)*8));
expr_539248_839829468(p0, e0, result0);
}
static N_INLINE(Ropeobj178006**, s_529179_3723162438)(Tcproc529021* p0, Tcprocsection529011 s0) {
Ropeobj178006** result0;
result0 = (Ropeobj178006**)0;
result0 = &(*p0).blocks->data[(NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1))].sections[(s0)- 0];
return result0;
}
N_NIMCALL(Ropeobj178006*, indentline_532656_839829468)(Tcproc529021* p0, Ropeobj178006* r0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = r0;
{
NI i_532680_839829468;
NI HEX3Atmp_532683_839829468;
NI res_532686_839829468;
i_532680_839829468 = (NI)0;
HEX3Atmp_532683_839829468 = (NI)0;
HEX3Atmp_532683_839829468 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1));
res_532686_839829468 = ((NI) 0);
{
while (1) {
if (!(res_532686_839829468 <= HEX3Atmp_532683_839829468)) goto LA3;
i_532680_839829468 = res_532686_839829468;
prepend_178893_2381377266(&result0, indent_532655_839829468);
res_532686_839829468 += ((NI) 1);
} LA3: ;
}
}
return result0;
}
N_NIMCALL(void, linefmt_532714_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
Ropeobj178006* LOC3;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, args0, args0Len0);
LOC3 = (Ropeobj178006*)0;
LOC3 = indentline_532656_839829468(p0, LOC2);
add_178482_2381377266(LOC1, LOC3);
}
N_NIMCALL(Ropeobj178006*, rdloc_538188_839829468)(Tloc292816* a0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = (*a0).r;
{
TY178507 LOC5;
if (!(((*a0).flags &(1U<<((NU)(((Tlocflag292810) 0))&15U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = result0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC5, 1);
}
LA3: ;
return result0;
}
N_NIMCALL(void, line_532690_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, Ropeobj178006* r0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = indentline_532656_839829468(p0, r0);
add_178482_2381377266(LOC1, LOC2);
}
N_NIMCALL(void, linef_532700_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
Ropeobj178006* LOC3;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX25_178905_2381377266(frmt0, args0, args0Len0);
LOC3 = (Ropeobj178006*)0;
LOC3 = indentline_532656_839829468(p0, LOC2);
add_178482_2381377266(LOC1, LOC3);
}
N_NIMCALL(void, gentypeinfoauxbase_535960_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0, Ropeobj178006* base0) {
NI nimtypekind0;
Ropeobj178006* size0;
TY535235 LOC17;
NI flags0;
Ropeobj178006* LOC33;
TY532811 LOC34;
NimStringDesc* LOC35;
nimtypekind0 = (NI)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = isobjlackingtypefield_533515_839829468(typ0);
if (!LOC3) goto LA4;
nimtypekind0 = ((NI) 18);
}
goto LA1;
LA4: ;
{
nimtypekind0 = ((NI) ((*typ0).kind));
}
LA1: ;
size0 = (Ropeobj178006*)0;
{
if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0)) goto LA9;
size0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_133));
}
goto LA7;
LA9: ;
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC12) goto LA13;
LOC12 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA13: ;
if (!LOC12) goto LA14;
size0 = gettypedesc_535673_839829468(m0, origtype0);
}
goto LA7;
LA14: ;
{
size0 = gettypedesc_535673_839829468(m0, typ0);
}
LA7: ;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = name0;
LOC17[1] = size0;
LOC17[2] = rope_178401_2381377266(((NI64) (nimtypekind0)));
LOC17[3] = base0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_134), LOC17, 4);
flags0 = ((NI) 0);
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = containsgarbagecollectedref_320117_3876443242(typ0);
if (!!(LOC20)) goto LA21;
flags0 = (NI)(flags0 | ((NI) 1));
}
LA21: ;
{
NIM_BOOL LOC25;
LOC25 = (NIM_BOOL)0;
LOC25 = canformacycle_320123_3876443242(typ0);
if (!!(LOC25)) goto LA26;
flags0 = (NI)(flags0 | ((NI) 2));
}
LA26: ;
{
TY532811 LOC32;
if (!!((flags0 == ((NI) 0)))) goto LA30;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = name0;
LOC32[1] = rope_178401_2381377266(((NI64) (flags0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_135), LOC32, 2);
}
LA30: ;
LOC33 = (Ropeobj178006*)0;
LOC33 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_129));
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = name0;
LOC35 = (NimStringDesc*)0;
LOC35 = typetostring_320017_3876443242(typ0, ((Tprefereddesc320011) 0));
LOC34[1] = rope_178277_2381377266(LOC35);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_136), LOC34, 2);
}
N_NIMCALL(Ropeobj178006*, getnimnode_535945_839829468)(Tcgen529027* m0) {
Ropeobj178006* result0;
TY532811 LOC1;
result0 = (Ropeobj178006*)0;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = (*m0).typenodesname;
LOC1[1] = rope_178401_2381377266(((NI64) ((*m0).typenodes)));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_138), LOC1, 2);
(*m0).typenodes += ((NI) 1);
return result0;
}
N_NIMCALL(void, gentupleinfo_536551_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0) {
Ropeobj178006* LOC1;
Ropeobj178006* expr0;
NI length0;
TY532811 LOC15;
LOC1 = (Ropeobj178006*)0;
LOC1 = rope_178277_2381377266(((NimStringDesc*) &T839829468_18));
gentypeinfoauxbase_535960_839829468(m0, typ0, typ0, name0, LOC1);
expr0 = getnimnode_535945_839829468(m0);
length0 = sonslen_295327_850551059(typ0);
{
Ropeobj178006* tmp0;
TY532811 LOC6;
TY535238 LOC12;
if (!(((NI) 0) < length0)) goto LA4;
tmp0 = gettempname_533598_839829468(m0);
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = tmp0;
LOC6[1] = rope_178401_2381377266(((NI64) (length0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC6, 2);
{
NI i_536573_839829468;
NI HEX3Atmp_536592_839829468;
NI res_536595_839829468;
i_536573_839829468 = (NI)0;
HEX3Atmp_536592_839829468 = (NI)0;
HEX3Atmp_536592_839829468 = (NI)(length0 - ((NI) 1));
res_536595_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* a0;
Ropeobj178006* tmp20;
TY535238 LOC10;
TY535235 LOC11;
if (!(res_536595_839829468 <= HEX3Atmp_536592_839829468)) goto LA9;
i_536573_839829468 = res_536595_839829468;
a0 = (*typ0).sons->data[i_536573_839829468];
tmp20 = getnimnode_535945_839829468(m0);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = tmp0;
LOC10[1] = rope_178401_2381377266(((NI64) (i_536573_839829468)));
LOC10[2] = tmp20;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC10, 3);
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = tmp20;
LOC11[1] = gettypedesc_535673_839829468(m0, typ0);
LOC11[2] = rope_178401_2381377266(((NI64) (i_536573_839829468)));
LOC11[3] = gentypeinfo_535941_839829468(m0, a0);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_141), LOC11, 4);
res_536595_839829468 += ((NI) 1);
} LA9: ;
}
}
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = expr0;
LOC12[1] = rope_178401_2381377266(((NI64) (length0)));
LOC12[2] = tmp0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_142), LOC12, 3);
}
goto LA2;
LA4: ;
{
TY532811 LOC14;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = expr0;
LOC14[1] = rope_178401_2381377266(((NI64) (length0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_143), LOC14, 2);
}
LA2: ;
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = name0;
LOC15[1] = expr0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_144), LOC15, 2);
}
N_NIMCALL(Ttype292840*, fakeclosuretype_537010_839829468)(Tsym292834* owner0) {
Ttype292840* result0;
Ttype292840* LOC1;
Ttype292840* r0;
Ttype292840* LOC2;
result0 = (Ttype292840*)0;
result0 = newtype_295107_850551059(((Ttypekind292244) 18), owner0);
LOC1 = (Ttype292840*)0;
LOC1 = newtype_295107_850551059(((Ttypekind292244) 26), owner0);
rawaddson_296394_850551059(result0, LOC1);
r0 = newtype_295107_850551059(((Ttypekind292244) 22), owner0);
LOC2 = (Ttype292840*)0;
LOC2 = newtype_295107_850551059(((Ttypekind292244) 18), owner0);
rawaddson_296394_850551059(r0, LOC2);
rawaddson_296394_850551059(result0, r0);
return result0;
}
N_NIMCALL(void, gentypeinfoaux_536027_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0) {
Ropeobj178006* base0;
base0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
NI LOC4;
Ttype292840* x0;
LOC3 = (NIM_BOOL)0;
LOC4 = (NI)0;
LOC4 = sonslen_295327_850551059(typ0);
LOC3 = (((NI) 0) < LOC4);
if (!(LOC3)) goto LA5;
LOC3 = !(((*typ0).sons->data[((NI) 0)] == NIM_NIL));
LA5: ;
if (!LOC3) goto LA6;
x0 = (*typ0).sons->data[((NI) 0)];
{
if (!((*typ0).kind == ((Ttypekind292244) 17))) goto LA10;
x0 = skiptypes_296099_850551059(x0, IL64(211106247215360));
}
LA10: ;
base0 = gentypeinfo_535941_839829468(m0, x0);
}
goto LA1;
LA6: ;
{
base0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_18));
}
LA1: ;
gentypeinfoauxbase_535960_839829468(m0, typ0, origtype0, name0, base0);
}
static N_INLINE(NIM_BOOL, iscomplexvaluetype_538317_839829468)(Ttype292840* t0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC3;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = ((983056 &((NU64)1<<((NU)((*t0).kind)&63U)))!=0);
if (LOC1) goto LA2;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*t0).kind == ((Ttypekind292244) 25));
if (!(LOC3)) goto LA4;
LOC3 = ((*t0).callconv == ((Tcallingconvention292002) 8));
LA4: ;
LOC1 = LOC3;
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, usestringh_532345_839829468)(Tcgen529027* m0) {
{
NIM_BOOL LOC5;
if (!!((((*m0).flags &(1U<<((NU)(((Codegenflag529025) 4))&7U)))!=0))) goto LA3;
(*m0).flags |= ((NU8)1)<<((((Codegenflag529025) 4))%(sizeof(NU8)*8));
LOC5 = (NIM_BOOL)0;
LOC5 = includestr_147249_3771138726((&(*m0).headerfiles), ((NimStringDesc*) &T839829468_151));
}
LA3: ;
}
N_NIMCALL(Ropeobj178006*, addrloc_538204_839829468)(Tloc292816* a0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = (*a0).r;
{
NIM_BOOL LOC3;
Tctypekind529007 LOC5;
Ropeobj178006* LOC8;
LOC3 = (NIM_BOOL)0;
LOC3 = !((((*a0).flags &(1U<<((NU)(((Tlocflag292810) 0))&15U)))!=0));
if (!(LOC3)) goto LA4;
LOC5 = (Tctypekind529007)0;
LOC5 = maptype_533394_839829468((*a0).t);
LOC3 = !((LOC5 == ((Tctypekind529007) 17)));
LA4: ;
if (!LOC3) goto LA6;
LOC8 = (Ropeobj178006*)0;
LOC8 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_128), result0);
result0 = HEX26_178447_2381377266(LOC8, ((NimStringDesc*) &T839829468_117));
}
LA6: ;
return result0;
}
N_NIMCALL(void, genobjectinit_538242_839829468)(Tcproc529021* p0, Tcprocsection529011 section0, Ttype292840* t0, Tloc292816* a0, NIM_BOOL takeaddr0) {
Ttypefieldresult320145 LOC1;
LOC1 = (Ttypefieldresult320145)0;
LOC1 = analyseobjectwithtypefield_320149_3876443242(t0);
switch (LOC1) {
case ((Ttypefieldresult320145) 0):
{
}
break;
case ((Ttypefieldresult320145) 1):
{
Ropeobj178006* r0;
Ttype292840* s0;
TY532811 LOC19;
r0 = rdloc_538188_839829468(a0);
{
TY178507 LOC8;
if (!!(takeaddr0)) goto LA6;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = r0;
r0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC8, 1);
}
LA6: ;
s0 = skiptypes_296099_850551059(t0, IL64(211106232576256));
{
NIM_BOOL LOC11;
LOC11 = (NIM_BOOL)0;
LOC11 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC11) goto LA12;
LOC11 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA12: ;
if (!!(LOC11)) goto LA13;
{
while (1) {
NIM_BOOL LOC17;
LOC17 = (NIM_BOOL)0;
LOC17 = ((*s0).kind == ((Ttypekind292244) 17));
if (!(LOC17)) goto LA18;
LOC17 = !(((*s0).sons->data[((NI) 0)] == NIM_NIL));
LA18: ;
if (!LOC17) goto LA16;
add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_153));
s0 = skiptypes_296099_850551059((*s0).sons->data[((NI) 0)], IL64(211106247215360));
} LA16: ;
}
}
LA13: ;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = r0;
LOC19[1] = gentypeinfo_535941_839829468((*p0).module, t0);
linefmt_532714_839829468(p0, section0, ((NimStringDesc*) &T839829468_154), LOC19, 2);
}
break;
case ((Ttypefieldresult320145) 2):
{
Ropeobj178006* r0;
TY532811 LOC26;
{
if (!takeaddr0) goto LA23;
r0 = addrloc_538204_839829468(a0);
}
goto LA21;
LA23: ;
{
r0 = rdloc_538188_839829468(a0);
}
LA21: ;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = r0;
LOC26[1] = gentypeinfo_535941_839829468((*p0).module, t0);
linefmt_532714_839829468(p0, section0, ((NimStringDesc*) &T839829468_155), LOC26, 2);
}
break;
}
}
N_NIMCALL(void, constructloc_538388_839829468)(Tcproc529021* p0, Tloc292816* loc0, NIM_BOOL istemp0) {
Ttype292840* typ0;
typ0 = skiptypes_296099_850551059((*loc0).t, IL64(211106233624832));
{
NIM_BOOL LOC3;
TY532811 LOC6;
LOC3 = (NIM_BOOL)0;
LOC3 = iscomplexvaluetype_538317_839829468(typ0);
if (!!(LOC3)) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rdloc_538188_839829468(loc0);
LOC6[1] = gettypedesc_535673_839829468((*p0).module, typ0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_150), LOC6, 2);
}
goto LA1;
LA4: ;
{
{
NIM_BOOL LOC10;
LOC10 = (NIM_BOOL)0;
LOC10 = !(istemp0);
if (LOC10) goto LA11;
LOC10 = containsgarbagecollectedref_320117_3876443242((*loc0).t);
LA11: ;
if (!LOC10) goto LA12;
{
NIM_BOOL LOC16;
TY532811 LOC19;
LOC16 = (NIM_BOOL)0;
LOC16 = isimportedcpptype_533478_839829468(typ0);
if (!!(LOC16)) goto LA17;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = addrloc_538204_839829468(loc0);
LOC19[1] = rdloc_538188_839829468(loc0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_152), LOC19, 2);
}
LA17: ;
}
LA12: ;
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), (*loc0).t, loc0, NIM_TRUE);
}
LA1: ;
}
N_NIMCALL(void, gettemp_537032_839829468)(Tcproc529021* p0, Ttype292840* t0, Tloc292816* result0, NIM_BOOL needsinit0) {
Ropeobj178006* LOC1;
TY532811 LOC2;
(*p0).labels += ((NI) 1);
LOC1 = (Ropeobj178006*)0;
LOC1 = rope_178401_2381377266(((NI64) ((*p0).labels)));
unsureAsgnRef((void**) (&(*result0).r), HEX26_178452_2381377266(((NimStringDesc*) &T839829468_149), LOC1));
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = gettypedesc_535673_839829468((*p0).module, t0);
LOC2[1] = (*result0).r;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_54), LOC2, 2);
(*result0).k = ((Tlockind292808) 1);
unsureAsgnRef((void**) (&(*result0).t), t0);
(*result0).s = ((Tstorageloc292812) 2);
(*result0).flags = 0;
constructloc_538388_839829468(p0, (&(*result0)), !(needsinit0));
}
static N_INLINE(Ropeobj178006*, parentobj_537257_839829468)(Ropeobj178006* accessor0, Tcgen529027* m0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
TY178507 LOC7;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!!(LOC3)) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = accessor0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_161), LOC7, 1);
}
goto LA1;
LA5: ;
{
result0 = accessor0;
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, intliteral_539270_839829468)(NI64 i0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (IL64(-2147483648) < i0);
if (!(LOC3)) goto LA4;
LOC3 = (i0 <= IL64(2147483647));
LA4: ;
if (!LOC3) goto LA5;
result0 = rope_178401_2381377266(i0);
}
goto LA1;
LA5: ;
{
TY533289 LOC10;
if (!(i0 == IL64(-2147483648))) goto LA8;
memset((void*)LOC10, 0, sizeof(LOC10));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_166), LOC10, 0);
}
goto LA1;
LA8: ;
{
TY178507 LOC14;
if (!((IL64(-9223372036854775807) - IL64(1)) < i0)) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rope_178401_2381377266(i0);
result0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_167), LOC14, 1);
}
goto LA1;
LA12: ;
{
TY533289 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_168), LOC16, 0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, int64literal_549430_839829468)(NI64 i0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
TY178507 LOC5;
if (!((IL64(-9223372036854775807) - IL64(1)) < i0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_178401_2381377266(i0);
result0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_167), LOC5, 1);
}
goto LA1;
LA3: ;
{
TY533289 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_168), LOC7, 0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, uint64literal_549442_839829468)(NU64 i0) {
Ropeobj178006* result0;
NimStringDesc* LOC1;
NimStringDesc* LOC2;
result0 = (Ropeobj178006*)0;
LOC1 = (NimStringDesc*)0;
LOC2 = (NimStringDesc*)0;
LOC2 = HEX24_8401_1689653243(i0);
LOC1 = rawNewString(LOC2->Sup.len + 3);
appendString(LOC1, LOC2);
appendString(LOC1, ((NimStringDesc*) &T839829468_171));
result0 = rope_178277_2381377266(LOC1);
return result0;
}
N_NIMCALL(Ropeobj178006*, getstrlit_549468_839829468)(Tcgen529027* m0, NimStringDesc* s0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
TY535238 LOC2;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_79));
result0 = gettempname_533598_839829468(m0);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = result0;
LOC2[1] = makecstring_191638_155036129(s0);
LOC2[2] = rope_178401_2381377266(((NI64) ((s0 ? s0->Sup.len : 0))));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_177), LOC2, 3);
return result0;
}
N_NIMCALL(Ropeobj178006*, genliteral_549476_839829468)(Tcproc529021* p0, Tnode292802* n0, Ttype292840* ty0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!(ty0 == NIM_NIL)) goto LA3;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_165));
}
LA3: ;
switch ((*n0).kind) {
case ((Tnodekind292020) 5) ... ((Tnodekind292020) 15):
{
Ttype292840* LOC6;
LOC6 = (Ttype292840*)0;
LOC6 = skiptypes_296099_850551059(ty0, IL64(211106242013440));
switch ((*LOC6).kind) {
case ((Ttypekind292244) 2):
case ((Ttypekind292244) 5):
{
result0 = intliteral_539270_839829468((*n0).kindU.S1.intval);
}
break;
case ((Ttypekind292244) 1):
{
{
TY533289 LOC13;
if (!!(((*n0).kindU.S1.intval == IL64(0)))) goto LA11;
memset((void*)LOC13, 0, sizeof(LOC13));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_169), LOC13, 0);
}
goto LA9;
LA11: ;
{
TY533289 LOC15;
memset((void*)LOC15, 0, sizeof(LOC15));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_170), LOC15, 0);
}
LA9: ;
}
break;
case ((Ttypekind292244) 35):
{
result0 = int64literal_549430_839829468((*n0).kindU.S1.intval);
}
break;
case ((Ttypekind292244) 44):
{
result0 = uint64literal_549442_839829468(((NU64) ((*n0).kindU.S1.intval)));
}
break;
default:
{
TY532811 LOC19;
Ttype292840* LOC20;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC20 = (Ttype292840*)0;
LOC20 = skiptypes_296099_850551059(ty0, IL64(211106242013440));
LOC19[0] = gettypedesc_535673_839829468((*p0).module, LOC20);
LOC19[1] = intliteral_539270_839829468((*n0).kindU.S1.intval);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_172), LOC19, 2);
}
break;
}
}
break;
case ((Tnodekind292020) 23):
{
Ttype292840* t0;
t0 = skiptypes_296099_850551059(ty0, IL64(211106242013440));
{
NIM_BOOL LOC24;
NI id0;
Ropeobj178006* LOC28;
LOC24 = (NIM_BOOL)0;
LOC24 = ((*t0).kind == ((Ttypekind292244) 25));
if (!(LOC24)) goto LA25;
LOC24 = ((*t0).callconv == ((Tcallingconvention292002) 8));
LA25: ;
if (!LOC24) goto LA26;
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
LOC28 = (Ropeobj178006*)0;
LOC28 = rope_178401_2381377266(((NI64) (id0)));
result0 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC28);
{
TY532811 LOC33;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA31;
(*(*p0).module).labels += ((NI) 1);
memset((void*)LOC33, 0, sizeof(LOC33));
LOC33[0] = gettypedesc_535673_839829468((*p0).module, t0);
LOC33[1] = result0;
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_173), LOC33, 2);
}
LA31: ;
}
goto LA22;
LA26: ;
{
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_174));
}
LA22: ;
}
break;
case ((Tnodekind292020) 20) ... ((Tnodekind292020) 22):
{
{
TY533289 LOC40;
if (!(*n0).kindU.S3.strval == 0) goto LA38;
memset((void*)LOC40, 0, sizeof(LOC40));
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_175), LOC40, 0);
}
goto LA36;
LA38: ;
{
Ttype292840* LOC42;
NI id0;
LOC42 = (Ttype292840*)0;
LOC42 = skiptypes_296099_850551059(ty0, IL64(211106242013440));
if (!((*LOC42).kind == ((Ttypekind292244) 28))) goto LA43;
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
{
TY178507 LOC49;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA47;
memset((void*)LOC49, 0, sizeof(LOC49));
LOC49[0] = getstrlit_549468_839829468((*p0).module, (*n0).kindU.S3.strval);
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_176), LOC49, 1);
}
goto LA45;
LA47: ;
{
TY532811 LOC51;
memset((void*)LOC51, 0, sizeof(LOC51));
LOC51[0] = (*(*p0).module).tmpbase;
LOC51[1] = rope_178401_2381377266(((NI64) (id0)));
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_178), LOC51, 2);
}
LA45: ;
}
goto LA36;
LA43: ;
{
result0 = makecstring_191638_155036129((*n0).kindU.S3.strval);
}
LA36: ;
}
break;
case ((Tnodekind292020) 16) ... ((Tnodekind292020) 18):
{
NimStringDesc* LOC54;
LOC54 = (NimStringDesc*)0;
LOC54 = tostrmaxprecision_298007_3471544153((*n0).kindU.S2.floatval);
result0 = rope_178277_2381377266(LOC54);
}
break;
default:
{
NimStringDesc* LOC56;
LOC56 = (NimStringDesc*)0;
LOC56 = rawNewString(reprEnum((NI)(*n0).kind, (&NTI292020))->Sup.len + 12);
appendString(LOC56, ((NimStringDesc*) &T839829468_179));
appendString(LOC56, reprEnum((NI)(*n0).kind, (&NTI292020)));
appendChar(LOC56, 41);
internalerror_196100_155036129((*n0).info, LOC56);
result0 = NIM_NIL;
}
break;
}
return result0;
}
N_NIMCALL(Ropeobj178006*, genliteral_539273_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = genliteral_549476_839829468(p0, n0, (*n0).typ);
return result0;
}
N_NIMCALL(void, gencaserange_537028_839829468)(Tcproc529021* p0, Tnode292802* branch0) {
NI length0;
length0 = len_293081_850551059(branch0);
{
NI j_547677_839829468;
NI HEX3Atmp_547718_839829468;
NI res_547721_839829468;
j_547677_839829468 = (NI)0;
HEX3Atmp_547718_839829468 = (NI)0;
HEX3Atmp_547718_839829468 = (NI)(length0 - ((NI) 2));
res_547721_839829468 = ((NI) 0);
{
while (1) {
if (!(res_547721_839829468 <= HEX3Atmp_547718_839829468)) goto LA3;
j_547677_839829468 = res_547721_839829468;
{
Tnode292802* LOC6;
LOC6 = (Tnode292802*)0;
LOC6 = HEX5BHEX5D_293238_850551059(branch0, j_547677_839829468);
if (!((*LOC6).kind == ((Tnodekind292020) 44))) goto LA7;
{
TY532811 LOC13;
Tnode292802* LOC14;
Tnode292802* LOC15;
Tnode292802* LOC16;
Tnode292802* LOC17;
if (!((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 0))&7U)))!=0)) goto LA11;
memset((void*)LOC13, 0, sizeof(LOC13));
LOC14 = (Tnode292802*)0;
LOC14 = HEX5BHEX5D_293238_850551059(branch0, j_547677_839829468);
LOC15 = (Tnode292802*)0;
LOC15 = HEX5BHEX5D_293238_850551059(LOC14, ((NI) 0));
LOC13[0] = genliteral_539273_839829468(p0, LOC15);
LOC16 = (Tnode292802*)0;
LOC16 = HEX5BHEX5D_293238_850551059(branch0, j_547677_839829468);
LOC17 = (Tnode292802*)0;
LOC17 = HEX5BHEX5D_293238_850551059(LOC16, ((NI) 1));
LOC13[1] = genliteral_539273_839829468(p0, LOC17);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_164), LOC13, 2);
}
goto LA9;
LA11: ;
{
Tnode292802* v0;
Tnode292802* LOC19;
Tnode292802* LOC20;
LOC19 = (Tnode292802*)0;
LOC19 = HEX5BHEX5D_293238_850551059(branch0, j_547677_839829468);
LOC20 = (Tnode292802*)0;
LOC20 = HEX5BHEX5D_293238_850551059(LOC19, ((NI) 0));
v0 = copynode_296528_850551059(LOC20);
{
while (1) {
Tnode292802* LOC23;
Tnode292802* LOC24;
TY178507 LOC25;
LOC23 = (Tnode292802*)0;
LOC23 = HEX5BHEX5D_293238_850551059(branch0, j_547677_839829468);
LOC24 = (Tnode292802*)0;
LOC24 = HEX5BHEX5D_293238_850551059(LOC23, ((NI) 1));
if (!((*v0).kindU.S1.intval <= (*LOC24).kindU.S1.intval)) goto LA22;
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = genliteral_539273_839829468(p0, v0);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_180), LOC25, 1);
(*v0).kindU.S1.intval += ((NI) 1);
} LA22: ;
}
}
LA9: ;
}
goto LA4;
LA7: ;
{
TY178507 LOC27;
Tnode292802* LOC28;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC28 = (Tnode292802*)0;
LOC28 = HEX5BHEX5D_293238_850551059(branch0, j_547677_839829468);
LOC27[0] = genliteral_539273_839829468(p0, LOC28);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_180), LOC27, 1);
}
LA4: ;
res_547721_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(void, gentraverseproc_537039_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Tnode292802* n0) {
{ {
if (!(n0 == NIM_NIL)) goto LA3;
goto BeforeRet;
}
LA3: ;
switch ((*n0).kind) {
case ((Tnodekind292020) 138):
{
{
NI i_537068_839829468;
NI HEX3Atmp_537239_839829468;
NI LOC7;
NI res_537242_839829468;
i_537068_839829468 = (NI)0;
HEX3Atmp_537239_839829468 = (NI)0;
LOC7 = (NI)0;
LOC7 = sonslen_295351_850551059(n0);
HEX3Atmp_537239_839829468 = (NI)(LOC7 - ((NI) 1));
res_537242_839829468 = ((NI) 0);
{
while (1) {
if (!(res_537242_839829468 <= HEX3Atmp_537239_839829468)) goto LA9;
i_537068_839829468 = res_537242_839829468;
gentraverseproc_537039_839829468(c0, accessor0, (*n0).kindU.S6.sons->data[i_537068_839829468]);
res_537242_839829468 += ((NI) 1);
} LA9: ;
}
}
}
break;
case ((Tnodekind292020) 139):
{
Tcproc529021* p0;
Tsym292834* disc0;
TY532811 LOC15;
TY533289 LOC28;
{
if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3)))) goto LA13;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_162));
}
LA13: ;
p0 = (*c0).p;
disc0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = accessor0;
LOC15[1] = (*disc0).loc.r;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_163), LOC15, 2);
{
NI i_537098_839829468;
NI HEX3Atmp_537249_839829468;
NI LOC17;
NI res_537252_839829468;
i_537098_839829468 = (NI)0;
HEX3Atmp_537249_839829468 = (NI)0;
LOC17 = (NI)0;
LOC17 = sonslen_295351_850551059(n0);
HEX3Atmp_537249_839829468 = (NI)(LOC17 - ((NI) 1));
res_537252_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* branch0;
Tnode292802* LOC26;
TY533289 LOC27;
if (!(res_537252_839829468 <= HEX3Atmp_537249_839829468)) goto LA19;
i_537098_839829468 = res_537252_839829468;
branch0 = (*n0).kindU.S6.sons->data[i_537098_839829468];
{
if (!((*branch0).kind == ((Tnodekind292020) 85))) goto LA22;
gencaserange_537028_839829468((*c0).p, branch0);
}
goto LA20;
LA22: ;
{
TY533289 LOC25;
memset((void*)LOC25, 0, sizeof(LOC25));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_181), LOC25, 0);
}
LA20: ;
LOC26 = (Tnode292802*)0;
LOC26 = lastson_295364_850551059(branch0);
gentraverseproc_537039_839829468(c0, accessor0, LOC26);
memset((void*)LOC27, 0, sizeof(LOC27));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_182), LOC27, 0);
res_537252_839829468 += ((NI) 1);
} LA19: ;
}
}
memset((void*)LOC28, 0, sizeof(LOC28));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_183), LOC28, 0);
}
break;
case ((Tnodekind292020) 3):
{
Tsym292834* field0;
TY532811 LOC34;
Ropeobj178006* LOC35;
field0 = (*n0).kindU.S4.sym;
{
if (!((*field0).loc.t == NIM_NIL)) goto LA32;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_184));
}
LA32: ;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = accessor0;
LOC34[1] = (*field0).loc.r;
LOC35 = (Ropeobj178006*)0;
LOC35 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC34, 2);
gentraverseproc_537022_839829468(c0, LOC35, (*field0).loc.t);
}
break;
default:
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_184));
}
break;
}
}BeforeRet: ;
}
N_NIMCALL(void, linecg_532707_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
Ropeobj178006* LOC3;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, args0, args0Len0);
LOC3 = (Ropeobj178006*)0;
LOC3 = indentline_532656_839829468(p0, LOC2);
add_178482_2381377266(LOC1, LOC3);
}
N_NIMCALL(void, gentraverseproc_537022_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Ttype292840* typ_537027_839829468) {
Ttype292840* typ_537302_839829468;
Tcproc529021* p0;
{ {
if (!(typ_537027_839829468 == NIM_NIL)) goto LA3;
goto BeforeRet;
}
LA3: ;
typ_537302_839829468 = getuniquetype_528640_2036603609(typ_537027_839829468);
p0 = (*c0).p;
switch ((*typ_537302_839829468).kind) {
case ((Ttypekind292244) 11):
case ((Ttypekind292244) 10):
case ((Ttypekind292244) 8):
{
Ttype292840* LOC6;
LOC6 = (Ttype292840*)0;
LOC6 = lastson_295377_850551059(typ_537302_839829468);
gentraverseproc_537022_839829468(c0, accessor0, LOC6);
}
break;
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 16):
{
NI64 arraysize0;
Tloc292816 i0;
Ttype292840* LOC8;
TY532811 LOC9;
TY532811 LOC10;
Ropeobj178006* LOC11;
TY533289 LOC12;
arraysize0 = lengthord_320007_3876443242((*typ_537302_839829468).sons->data[((NI) 0)]);
memset((void*)(&i0), 0, sizeof(i0));
LOC8 = (Ttype292840*)0;
LOC8 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC8, (&i0), NIM_FALSE);
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = i0.r;
LOC9[1] = rope_178401_2381377266(arraysize0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_159), LOC9, 2);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = accessor0;
LOC10[1] = i0.r;
LOC11 = (Ropeobj178006*)0;
LOC11 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC10, 2);
gentraverseproc_537022_839829468(c0, LOC11, (*typ_537302_839829468).sons->data[((NI) 1)]);
memset((void*)LOC12, 0, sizeof(LOC12));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC12, 0);
}
break;
case ((Ttypekind292244) 17):
{
{
NI i_537325_839829468;
NI HEX3Atmp_537384_839829468;
NI LOC15;
NI res_537387_839829468;
i_537325_839829468 = (NI)0;
HEX3Atmp_537384_839829468 = (NI)0;
LOC15 = (NI)0;
LOC15 = sonslen_295327_850551059(typ_537302_839829468);
HEX3Atmp_537384_839829468 = (NI)(LOC15 - ((NI) 1));
res_537387_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* x0;
Ropeobj178006* LOC22;
if (!(res_537387_839829468 <= HEX3Atmp_537384_839829468)) goto LA17;
i_537325_839829468 = res_537387_839829468;
x0 = (*typ_537302_839829468).sons->data[i_537325_839829468];
{
if (!!((x0 == NIM_NIL))) goto LA20;
x0 = skiptypes_296099_850551059(x0, IL64(211106247215360));
}
LA20: ;
LOC22 = (Ropeobj178006*)0;
LOC22 = parentobj_537257_839829468(accessor0, (*(*c0).p).module);
gentraverseproc_537022_839829468(c0, LOC22, x0);
res_537387_839829468 += ((NI) 1);
} LA17: ;
}
}
{
if (!!(((*typ_537302_839829468).n == NIM_NIL))) goto LA25;
gentraverseproc_537039_839829468(c0, accessor0, (*typ_537302_839829468).n);
}
LA25: ;
}
break;
case ((Ttypekind292244) 18):
{
Ttype292840* typ0;
typ0 = getuniquetype_528640_2036603609(typ_537302_839829468);
{
NI i_537363_839829468;
NI HEX3Atmp_537392_839829468;
NI LOC29;
NI res_537395_839829468;
i_537363_839829468 = (NI)0;
HEX3Atmp_537392_839829468 = (NI)0;
LOC29 = (NI)0;
LOC29 = sonslen_295327_850551059(typ0);
HEX3Atmp_537392_839829468 = (NI)(LOC29 - ((NI) 1));
res_537395_839829468 = ((NI) 0);
{
while (1) {
TY532811 LOC32;
Ropeobj178006* LOC33;
if (!(res_537395_839829468 <= HEX3Atmp_537392_839829468)) goto LA31;
i_537363_839829468 = res_537395_839829468;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = accessor0;
LOC32[1] = rope_178401_2381377266(((NI64) (i_537363_839829468)));
LOC33 = (Ropeobj178006*)0;
LOC33 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_185), LOC32, 2);
gentraverseproc_537022_839829468(c0, LOC33, (*typ0).sons->data[i_537363_839829468]);
res_537395_839829468 += ((NI) 1);
} LA31: ;
}
}
}
break;
case ((Ttypekind292244) 22):
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
TY178507 LOC35;
memset((void*)LOC35, 0, sizeof(LOC35));
LOC35[0] = accessor0;
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), (*c0).visitorfrmt, LOC35, 1);
}
break;
case ((Ttypekind292244) 25):
{
{
TY178507 LOC41;
TY178507 LOC42;
if (!((*typ_537302_839829468).callconv == ((Tcallingconvention292002) 8))) goto LA39;
memset((void*)LOC41, 0, sizeof(LOC41));
memset((void*)LOC42, 0, sizeof(LOC42));
LOC42[0] = accessor0;
LOC41[0] = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_186), LOC42, 1);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), (*c0).visitorfrmt, LOC41, 1);
}
LA39: ;
}
break;
default:
{
}
break;
}
}BeforeRet: ;
}
N_NIMCALL(void, gentraverseprocseq_537399_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Ttype292840* typ0) {
Tcproc529021* p0;
Tloc292816 i0;
Ttype292840* LOC1;
TY535238 LOC2;
NimStringDesc* LOC3;
TY532811 LOC11;
Ropeobj178006* LOC12;
TY533289 LOC13;
p0 = (*c0).p;
memset((void*)(&i0), 0, sizeof(i0));
LOC1 = (Ttype292840*)0;
LOC1 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC1, (&i0), NIM_FALSE);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = i0.r;
LOC2[1] = accessor0;
LOC3 = (NimStringDesc*)0;
{
NIM_BOOL LOC6;
LOC6 = (NIM_BOOL)0;
LOC6 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC6) goto LA7;
LOC6 = (((*(*(*(*c0).p).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA7: ;
if (!LOC6) goto LA8;
LOC3 = copyString(((NimStringDesc*) &T839829468_157));
}
goto LA4;
LA8: ;
{
LOC3 = copyString(((NimStringDesc*) &T839829468_158));
}
LA4: ;
LOC2[2] = rope_178277_2381377266(LOC3);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_156), LOC2, 3);
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = accessor0;
LOC11[1] = i0.r;
LOC12 = (Ropeobj178006*)0;
LOC12 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_187), LOC11, 2);
gentraverseproc_537022_839829468(c0, LOC12, (*typ0).sons->data[((NI) 0)]);
memset((void*)LOC13, 0, sizeof(LOC13));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC13, 0);
}
N_NIMCALL(Ropeobj178006*, gentraverseproc_537632_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttypeinforeason537016 reason0) {
Ropeobj178006* result0;
Ttraversalclosure537019 c0;
Tcproc529021* p0;
Ropeobj178006* header0;
TY178507 LOC3;
Ropeobj178006* t0;
TY178507 LOC4;
TY178507 LOC5;
Ropeobj178006* generatedproc0;
TY535235 LOC20;
Ropeobj178006** LOC21;
Ropeobj178006** LOC22;
Ropeobj178006** LOC23;
TY178507 LOC24;
result0 = (Ropeobj178006*)0;
memset((void*)(&c0), 0, sizeof(c0));
p0 = newproc_529206_3723162438(NIM_NIL, m0);
result0 = gettempname_533598_839829468(m0);
switch (reason0) {
case ((Ttypeinforeason537016) 0):
{
c0.visitorfrmt = copyString(((NimStringDesc*) &T839829468_145));
}
break;
default:
{
}
break;
}
memset((void*)LOC3, 0, sizeof(LOC3));
LOC3[0] = result0;
header0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_146), LOC3, 1);
t0 = gettypedesc_535673_839829468(m0, typ0);
memset((void*)LOC4, 0, sizeof(LOC4));
LOC4[0] = t0;
linef_532700_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_147), LOC4, 1);
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = t0;
linef_532700_839829468(p0, ((Tcprocsection529011) 1), ((NimStringDesc*) &T839829468_148), LOC5, 1);
c0.p = p0;
{
Ropeobj178006* LOC10;
if (!((*typ0).kind == ((Ttypekind292244) 24))) goto LA8;
LOC10 = (Ropeobj178006*)0;
LOC10 = rope_178277_2381377266(((NimStringDesc*) &T839829468_188));
gentraverseprocseq_537399_839829468((&c0), LOC10, typ0);
}
goto LA6;
LA8: ;
{
{
Ttype292840* LOC14;
Ropeobj178006* LOC17;
LOC14 = (Ttype292840*)0;
LOC14 = skiptypes_296099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106232576256));
if (!((65552 &((NU64)1<<((NU)((*LOC14).kind)&63U)))!=0)) goto LA15;
LOC17 = (Ropeobj178006*)0;
LOC17 = rope_178277_2381377266(((NimStringDesc*) &T839829468_188));
gentraverseproc_537022_839829468((&c0), LOC17, (*typ0).sons->data[((NI) 0)]);
}
goto LA12;
LA15: ;
{
Ropeobj178006* LOC19;
LOC19 = (Ropeobj178006*)0;
LOC19 = rope_178277_2381377266(((NimStringDesc*) &T839829468_189));
gentraverseproc_537022_839829468((&c0), LOC19, (*typ0).sons->data[((NI) 0)]);
}
LA12: ;
}
LA6: ;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = header0;
LOC21 = (Ropeobj178006**)0;
LOC21 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
LOC20[1] = (*LOC21);
LOC22 = (Ropeobj178006**)0;
LOC22 = s_529179_3723162438(p0, ((Tcprocsection529011) 1));
LOC20[2] = (*LOC22);
LOC23 = (Ropeobj178006**)0;
LOC23 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
LOC20[3] = (*LOC23);
generatedproc0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_190), LOC20, 4);
memset((void*)LOC24, 0, sizeof(LOC24));
LOC24[0] = header0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 7))- 0], ((NimStringDesc*) &T839829468_191), LOC24, 1);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 10))- 0], generatedproc0);
return result0;
}
N_NIMCALL(void, genarrayinfo_537005_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0) {
Ropeobj178006* LOC1;
LOC1 = (Ropeobj178006*)0;
LOC1 = gentypeinfo_535941_839829468(m0, (*typ0).sons->data[((NI) 1)]);
gentypeinfoauxbase_535960_839829468(m0, typ0, typ0, name0, LOC1);
}
N_NIMCALL(void, gensetinfo_536867_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0) {
Ropeobj178006* tmp0;
TY535238 LOC1;
NI64 LOC2;
gentypeinfoaux_536027_839829468(m0, typ0, typ0, name0);
tmp0 = getnimnode_535945_839829468(m0);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = tmp0;
LOC2 = (NI64)0;
LOC2 = firstord_320001_3876443242(typ0);
LOC1[1] = rope_178401_2381377266(LOC2);
LOC1[2] = name0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_193), LOC1, 3);
}
N_NIMCALL(void, genenuminfo_536599_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0) {
Ropeobj178006* nodeptrs0;
NI length0;
TY532811 LOC1;
Ropeobj178006* enumnames0;
Ropeobj178006* specialcases0;
NI firstnimnode0;
NIM_BOOL hasholes0;
Ropeobj178006* enumarray0;
Ropeobj178006* counter0;
TY178507 LOC24;
TY535238 LOC25;
TY536847 LOC26;
TY535235 LOC27;
gentypeinfoaux_536027_839829468(m0, typ0, typ0, name0);
nodeptrs0 = gettempname_533598_839829468(m0);
length0 = sonslen_295351_850551059((*typ0).n);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = nodeptrs0;
LOC1[1] = rope_178401_2381377266(((NI64) (length0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC1, 2);
enumnames0 = (Ropeobj178006*)0;
specialcases0 = (Ropeobj178006*)0;
firstnimnode0 = (*m0).typenodes;
hasholes0 = NIM_FALSE;
{
NI i_536624_839829468;
NI HEX3Atmp_536860_839829468;
NI res_536863_839829468;
i_536624_839829468 = (NI)0;
HEX3Atmp_536860_839829468 = (NI)0;
HEX3Atmp_536860_839829468 = (NI)(length0 - ((NI) 1));
res_536863_839829468 = ((NI) 0);
{
while (1) {
Tsym292834* field0;
Ropeobj178006* elemnode0;
if (!(res_536863_839829468 <= HEX3Atmp_536860_839829468)) goto LA4;
i_536624_839829468 = res_536863_839829468;
field0 = (*(*(*typ0).n).kindU.S6.sons->data[i_536624_839829468]).kindU.S4.sym;
elemnode0 = getnimnode_535945_839829468(m0);
{
Ropeobj178006* LOC9;
if (!((*field0).ast == NIM_NIL)) goto LA7;
LOC9 = (Ropeobj178006*)0;
LOC9 = makecstring_191638_155036129((*(*field0).name).s);
add_178482_2381377266(&enumnames0, LOC9);
}
goto LA5;
LA7: ;
{
Ropeobj178006* LOC11;
LOC11 = (Ropeobj178006*)0;
LOC11 = makecstring_191638_155036129((*(*field0).ast).kindU.S3.strval);
add_178482_2381377266(&enumnames0, LOC11);
}
LA5: ;
{
NimStringDesc* LOC16;
if (!(i_536624_839829468 < (NI)(length0 - ((NI) 1)))) goto LA14;
LOC16 = (NimStringDesc*)0;
LOC16 = rawNewString(tnl_176644_4151366050->Sup.len + 2);
appendString(LOC16, ((NimStringDesc*) &T839829468_110));
appendString(LOC16, tnl_176644_4151366050);
add_178487_2381377266(&enumnames0, LOC16);
}
LA14: ;
{
NIM_BOOL LOC19;
TY532811 LOC23;
LOC19 = (NIM_BOOL)0;
LOC19 = !(((*field0).position == i_536624_839829468));
if (LOC19) goto LA20;
LOC19 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 5))&31U)))!=0);
LA20: ;
if (!LOC19) goto LA21;
memset((void*)LOC23, 0, sizeof(LOC23));
LOC23[0] = elemnode0;
LOC23[1] = rope_178401_2381377266(((NI64) ((*field0).position)));
addf_179205_2381377266(&specialcases0, ((NimStringDesc*) &T839829468_194), LOC23, 2);
hasholes0 = NIM_TRUE;
}
LA21: ;
res_536863_839829468 += ((NI) 1);
} LA4: ;
}
}
enumarray0 = gettempname_533598_839829468(m0);
counter0 = gettempname_533598_839829468(m0);
memset((void*)LOC24, 0, sizeof(LOC24));
LOC24[0] = counter0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_195), LOC24, 1);
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = enumarray0;
LOC25[1] = rope_178401_2381377266(((NI64) (length0)));
LOC25[2] = enumnames0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_196), LOC25, 3);
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = counter0;
LOC26[1] = rope_178401_2381377266(((NI64) (length0)));
LOC26[2] = (*m0).typenodesname;
LOC26[3] = rope_178401_2381377266(((NI64) (firstnimnode0)));
LOC26[4] = enumarray0;
LOC26[5] = nodeptrs0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_197), LOC26, 6);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], specialcases0);
memset((void*)LOC27, 0, sizeof(LOC27));
LOC27[0] = getnimnode_535945_839829468(m0);
LOC27[1] = rope_178401_2381377266(((NI64) (length0)));
LOC27[2] = nodeptrs0;
LOC27[3] = name0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_198), LOC27, 4);
{
TY178507 LOC32;
if (!hasholes0) goto LA30;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = name0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_199), LOC32, 1);
}
LA30: ;
}
N_NIMCALL(Ropeobj178006*, discriminatortablename_536057_839829468)(Tcgen529027* m0, Ttype292840* objtype_536060_839829468, Tsym292834* d0) {
Ropeobj178006* result0;
Ttype292840* objtype0;
TY532811 LOC8;
NimStringDesc* LOC9;
result0 = (Ropeobj178006*)0;
objtype0 = objtype_536060_839829468;
{
while (1) {
Tsym292834* LOC3;
LOC3 = (Tsym292834*)0;
LOC3 = lookupinrecord_299119_2984716966((*objtype0).n, (*d0).name);
if (!(LOC3 == NIM_NIL)) goto LA2;
objtype0 = (*objtype0).sons->data[((NI) 0)];
} LA2: ;
}
{
if (!((*objtype0).sym == NIM_NIL)) goto LA6;
internalerror_196100_155036129((*d0).info, ((NimStringDesc*) &T839829468_200));
}
LA6: ;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rope_178401_2381377266(((NI64) ((*objtype0).Sup.id)));
LOC9 = (NimStringDesc*)0;
LOC9 = mangle_528847_2036603609((*(*d0).name).s);
LOC8[1] = rope_178277_2381377266(LOC9);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_201), LOC8, 2);
return result0;
}
N_NIMCALL(void, genobjectfields_536104_839829468)(Tcgen529027* m0, Ttype292840* typ0, Tnode292802* n0, Ropeobj178006* expr0) {
switch ((*n0).kind) {
case ((Tnodekind292020) 138):
{
NI L0;
L0 = sonslen_295351_850551059(n0);
{
if (!(L0 == ((NI) 1))) goto LA4;
genobjectfields_536104_839829468(m0, typ0, (*n0).kindU.S6.sons->data[((NI) 0)], expr0);
}
goto LA2;
LA4: ;
{
Ropeobj178006* tmp0;
TY532811 LOC9;
TY535238 LOC14;
if (!(((NI) 0) < L0)) goto LA7;
tmp0 = gettempname_533598_839829468(m0);
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = tmp0;
LOC9[1] = rope_178401_2381377266(((NI64) (L0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC9, 2);
{
NI i_536127_839829468;
NI HEX3Atmp_536482_839829468;
NI res_536485_839829468;
i_536127_839829468 = (NI)0;
HEX3Atmp_536482_839829468 = (NI)0;
HEX3Atmp_536482_839829468 = (NI)(L0 - ((NI) 1));
res_536485_839829468 = ((NI) 0);
{
while (1) {
Ropeobj178006* tmp20;
TY535238 LOC13;
if (!(res_536485_839829468 <= HEX3Atmp_536482_839829468)) goto LA12;
i_536127_839829468 = res_536485_839829468;
tmp20 = getnimnode_535945_839829468(m0);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = tmp0;
LOC13[1] = rope_178401_2381377266(((NI64) (i_536127_839829468)));
LOC13[2] = tmp20;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC13, 3);
genobjectfields_536104_839829468(m0, typ0, (*n0).kindU.S6.sons->data[i_536127_839829468], tmp20);
res_536485_839829468 += ((NI) 1);
} LA12: ;
}
}
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = expr0;
LOC14[1] = rope_178401_2381377266(((NI64) (L0)));
LOC14[2] = tmp0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_142), LOC14, 3);
}
goto LA2;
LA7: ;
{
TY532811 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = expr0;
LOC16[1] = rope_178401_2381377266(((NI64) (L0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_143), LOC16, 2);
}
LA2: ;
}
break;
case ((Tnodekind292020) 139):
{
Tsym292834* field0;
Ropeobj178006* tmp0;
NI64 L0;
TY536401 LOC18;
TY532811 LOC19;
field0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
tmp0 = discriminatortablename_536057_839829468(m0, typ0, field0);
L0 = lengthord_320007_3876443242((*field0).typ);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = expr0;
LOC18[1] = gettypedesc_535673_839829468(m0, typ0);
LOC18[2] = (*field0).loc.r;
LOC18[3] = gentypeinfo_535941_839829468(m0, (*field0).typ);
LOC18[4] = makecstring_191638_155036129((*(*field0).name).s);
LOC18[5] = tmp0;
LOC18[6] = rope_178401_2381377266(L0);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_202), LOC18, 7);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = tmp0;
LOC19[1] = rope_178401_2381377266((NI64)(L0 + IL64(1)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_203), LOC19, 2);
{
NI i_536421_839829468;
NI HEX3Atmp_536501_839829468;
NI LOC21;
NI res_536504_839829468;
i_536421_839829468 = (NI)0;
HEX3Atmp_536501_839829468 = (NI)0;
LOC21 = (NI)0;
LOC21 = sonslen_295351_850551059(n0);
HEX3Atmp_536501_839829468 = (NI)(LOC21 - ((NI) 1));
res_536504_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* b0;
Ropeobj178006* tmp20;
Tnode292802* LOC24;
if (!(res_536504_839829468 <= HEX3Atmp_536501_839829468)) goto LA23;
i_536421_839829468 = res_536504_839829468;
b0 = (*n0).kindU.S6.sons->data[i_536421_839829468];
tmp20 = getnimnode_535945_839829468(m0);
LOC24 = (Tnode292802*)0;
LOC24 = lastson_295364_850551059(b0);
genobjectfields_536104_839829468(m0, typ0, LOC24, tmp20);
switch ((*b0).kind) {
case ((Tnodekind292020) 85):
{
{
NI LOC28;
LOC28 = (NI)0;
LOC28 = sonslen_295351_850551059(b0);
if (!(LOC28 < ((NI) 2))) goto LA29;
internalerror_196100_155036129((*b0).info, ((NimStringDesc*) &T839829468_204));
}
LA29: ;
{
NI j_536436_839829468;
NI HEX3Atmp_536494_839829468;
NI LOC32;
NI res_536497_839829468;
j_536436_839829468 = (NI)0;
HEX3Atmp_536494_839829468 = (NI)0;
LOC32 = (NI)0;
LOC32 = sonslen_295351_850551059(b0);
HEX3Atmp_536494_839829468 = (NI)(LOC32 - ((NI) 2));
res_536497_839829468 = ((NI) 0);
{
while (1) {
if (!(res_536497_839829468 <= HEX3Atmp_536494_839829468)) goto LA34;
j_536436_839829468 = res_536497_839829468;
{
NI x0;
NI64 LOC39;
NI y0;
NI64 LOC40;
if (!((*(*b0).kindU.S6.sons->data[j_536436_839829468]).kind == ((Tnodekind292020) 44))) goto LA37;
LOC39 = (NI64)0;
LOC39 = getordvalue_320129_3876443242((*(*b0).kindU.S6.sons->data[j_536436_839829468]).kindU.S6.sons->data[((NI) 0)]);
x0 = ((NI) (LOC39));
LOC40 = (NI64)0;
LOC40 = getordvalue_320129_3876443242((*(*b0).kindU.S6.sons->data[j_536436_839829468]).kindU.S6.sons->data[((NI) 1)]);
y0 = ((NI) (LOC40));
{
while (1) {
TY535238 LOC43;
if (!(x0 <= y0)) goto LA42;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC43[0] = tmp0;
LOC43[1] = rope_178401_2381377266(((NI64) (x0)));
LOC43[2] = tmp20;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC43, 3);
x0 += ((NI) 1);
} LA42: ;
}
}
goto LA35;
LA37: ;
{
TY535238 LOC45;
NI64 LOC46;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC45[0] = tmp0;
LOC46 = (NI64)0;
LOC46 = getordvalue_320129_3876443242((*b0).kindU.S6.sons->data[j_536436_839829468]);
LOC45[1] = rope_178401_2381377266(LOC46);
LOC45[2] = tmp20;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC45, 3);
}
LA35: ;
res_536497_839829468 += ((NI) 1);
} LA34: ;
}
}
}
break;
case ((Tnodekind292020) 88):
{
TY535238 LOC48;
memset((void*)LOC48, 0, sizeof(LOC48));
LOC48[0] = tmp0;
LOC48[1] = rope_178401_2381377266(L0);
LOC48[2] = tmp20;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC48, 3);
}
break;
default:
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_205));
}
break;
}
res_536504_839829468 += ((NI) 1);
} LA23: ;
}
}
}
break;
case ((Tnodekind292020) 3):
{
Tsym292834* field0;
field0 = (*n0).kindU.S4.sym;
{
TY536475 LOC55;
if (!((*field0).kindU.S4.bitsize == ((NI) 0))) goto LA53;
memset((void*)LOC55, 0, sizeof(LOC55));
LOC55[0] = expr0;
LOC55[1] = gettypedesc_535673_839829468(m0, typ0);
LOC55[2] = (*field0).loc.r;
LOC55[3] = gentypeinfo_535941_839829468(m0, (*field0).typ);
LOC55[4] = makecstring_191638_155036129((*(*field0).name).s);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_206), LOC55, 5);
}
LA53: ;
}
break;
default:
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_207));
}
break;
}
}
N_NIMCALL(void, genobjectinfo_536508_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0) {
Ropeobj178006* tmp0;
TY532811 LOC12;
Ttype292840* t0;
{
if (!((*typ0).kind == ((Ttypekind292244) 17))) goto LA3;
gentypeinfoaux_536027_839829468(m0, typ0, origtype0, name0);
}
goto LA1;
LA3: ;
{
Ropeobj178006* LOC6;
LOC6 = (Ropeobj178006*)0;
LOC6 = rope_178277_2381377266(((NimStringDesc*) &T839829468_18));
gentypeinfoauxbase_535960_839829468(m0, typ0, origtype0, name0, LOC6);
}
LA1: ;
tmp0 = getnimnode_535945_839829468(m0);
{
NIM_BOOL LOC9;
LOC9 = (NIM_BOOL)0;
LOC9 = isimportedcpptype_533478_839829468(typ0);
if (!!(LOC9)) goto LA10;
genobjectfields_536104_839829468(m0, typ0, (*typ0).n, tmp0);
}
LA10: ;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = name0;
LOC12[1] = tmp0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_144), LOC12, 2);
t0 = (*typ0).sons->data[((NI) 0)];
{
while (1) {
if (!!((t0 == NIM_NIL))) goto LA14;
t0 = skiptypes_296099_850551059(t0, IL64(211106247215360));
(*t0).flags |= ((NU32)1)<<((((Ttypeflag292431) 5))%(sizeof(NU32)*8));
t0 = (*t0).sons->data[((NI) 0)];
} LA14: ;
}
}
N_NIMCALL(void, gendeepcopyproc_538066_839829468)(Tcgen529027* m0, Tsym292834* s0, Ropeobj178006* result0) {
TY532811 LOC1;
genproc_532951_839829468(m0, s0);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = result0;
LOC1[1] = (*s0).loc.r;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_208), LOC1, 2);
}
N_NIMCALL(Ropeobj178006*, gentypeinfo_535941_839829468)(Tcgen529027* m0, Ttype292840* t_535944_839829468) {
Ropeobj178006* result0;
Ttype292840* origtype0;
Ttype292840* t0;
TY178507 LOC1;
Tsym292834* owner0;
Ttype292840* LOC12;
Ropeobj178006* LOC66;
Ropeobj178006* LOC67;
Ropeobj178006* LOC68;
{ result0 = (Ropeobj178006*)0;
origtype0 = t_535944_839829468;
t0 = getuniquetype_528640_2036603609(t_535944_839829468);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rope_178401_2381377266(((NI64) ((*t0).Sup.id)));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_127), LOC1, 1);
{
NIM_BOOL LOC4;
Ropeobj178006* LOC7;
Ropeobj178006* LOC8;
Ropeobj178006* LOC9;
LOC4 = (NIM_BOOL)0;
LOC4 = containsorincl_268862_2627731572((&(*m0).typeinfomarker), (*t0).Sup.id);
if (!LOC4) goto LA5;
LOC7 = (Ropeobj178006*)0;
LOC7 = rope_178277_2381377266(((NimStringDesc*) &T839829468_128));
LOC8 = (Ropeobj178006*)0;
LOC8 = HEX26_178418_2381377266(LOC7, result0);
LOC9 = (Ropeobj178006*)0;
LOC9 = rope_178277_2381377266(((NimStringDesc*) &T839829468_117));
result0 = HEX26_178418_2381377266(LOC8, LOC9);
goto BeforeRet;
}
LA5: ;
{
while (1) {
if (!((*t0).kind == ((Ttypekind292244) 13))) goto LA11;
t0 = lastson_295377_850551059(t0);
} LA11: ;
}
LOC12 = (Ttype292840*)0;
LOC12 = skiptypes_296099_850551059(t0, IL64(211106247256320));
owner0 = getmodule_299123_2984716966((*LOC12).owner);
{
Tcgen529027* LOC17;
Ropeobj178006* LOC18;
Ropeobj178006* LOC19;
Ropeobj178006* LOC20;
TY532811 LOC21;
NimStringDesc* LOC22;
Ropeobj178006* LOC23;
Ropeobj178006* LOC24;
Ropeobj178006* LOC25;
if (!!((owner0 == (*m0).module))) goto LA15;
LOC17 = (Tcgen529027*)0;
LOC17 = bmod_529201_3723162438(owner0);
LOC18 = (Ropeobj178006*)0;
LOC18 = gentypeinfo_535941_839829468(LOC17, t0);
LOC19 = (Ropeobj178006*)0;
LOC19 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_129));
LOC20 = (Ropeobj178006*)0;
LOC20 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_130));
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = result0;
LOC22 = (NimStringDesc*)0;
LOC22 = typetostring_320017_3876443242(t0, ((Tprefereddesc320011) 0));
LOC21[1] = rope_178277_2381377266(LOC22);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_131), LOC21, 2);
LOC23 = (Ropeobj178006*)0;
LOC23 = rope_178277_2381377266(((NimStringDesc*) &T839829468_128));
LOC24 = (Ropeobj178006*)0;
LOC24 = HEX26_178418_2381377266(LOC23, result0);
LOC25 = (Ropeobj178006*)0;
LOC25 = rope_178277_2381377266(((NimStringDesc*) &T839829468_117));
result0 = HEX26_178418_2381377266(LOC24, LOC25);
goto BeforeRet;
}
LA15: ;
switch ((*t0).kind) {
case ((Ttypekind292244) 3):
case ((Ttypekind292244) 62):
{
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_132));
}
break;
case ((Ttypekind292244) 26):
case ((Ttypekind292244) 1):
case ((Ttypekind292244) 2):
case ((Ttypekind292244) 29):
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44):
case ((Ttypekind292244) 23):
{
Ropeobj178006* LOC28;
LOC28 = (Ropeobj178006*)0;
LOC28 = rope_178277_2381377266(((NimStringDesc*) &T839829468_132));
gentypeinfoauxbase_535960_839829468(m0, t0, t0, result0, LOC28);
}
break;
case ((Ttypekind292244) 59):
{
{
Ttype292840* LOC34;
if (!!(((*t0).n == NIM_NIL))) goto LA32;
LOC34 = (Ttype292840*)0;
LOC34 = lastson_295377_850551059(t0);
result0 = gentypeinfo_535941_839829468(m0, LOC34);
}
goto LA30;
LA32: ;
{
NimStringDesc* LOC36;
LOC36 = (NimStringDesc*)0;
LOC36 = rawNewString(reprEnum((NI)(*t0).kind, (&NTI292244))->Sup.len + 13);
appendString(LOC36, ((NimStringDesc*) &T839829468_137));
appendString(LOC36, reprEnum((NI)(*t0).kind, (&NTI292244)));
appendChar(LOC36, 41);
internalerror_196113_155036129(LOC36);
}
LA30: ;
}
break;
case ((Ttypekind292244) 25):
{
{
Ropeobj178006* LOC42;
if (!!(((*t0).callconv == ((Tcallingconvention292002) 8)))) goto LA40;
LOC42 = (Ropeobj178006*)0;
LOC42 = rope_178277_2381377266(((NimStringDesc*) &T839829468_132));
gentypeinfoauxbase_535960_839829468(m0, t0, t0, result0, LOC42);
}
goto LA38;
LA40: ;
{
Ttype292840* LOC44;
LOC44 = (Ttype292840*)0;
LOC44 = fakeclosuretype_537010_839829468((*t0).owner);
gentupleinfo_536551_839829468(m0, LOC44, result0);
}
LA38: ;
}
break;
case ((Ttypekind292244) 24):
case ((Ttypekind292244) 22):
{
gentypeinfoaux_536027_839829468(m0, t0, t0, result0);
{
Ropeobj178006* markerproc0;
TY532811 LOC50;
if (!(((Tgcmode169080) 4) <= gselectedgc_169133_2607990831)) goto LA48;
markerproc0 = gentraverseproc_537632_839829468(m0, t0, ((Ttypeinforeason537016) 0));
memset((void*)LOC50, 0, sizeof(LOC50));
LOC50[0] = result0;
LOC50[1] = markerproc0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_192), LOC50, 2);
}
LA48: ;
}
break;
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 20):
{
gentypeinfoaux_536027_839829468(m0, t0, t0, result0);
}
break;
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 16):
{
genarrayinfo_537005_839829468(m0, t0, result0);
}
break;
case ((Ttypekind292244) 19):
{
gensetinfo_536867_839829468(m0, t0, result0);
}
break;
case ((Ttypekind292244) 14):
{
genenuminfo_536599_839829468(m0, t0, result0);
}
break;
case ((Ttypekind292244) 17):
{
genobjectinfo_536508_839829468(m0, t0, origtype0, result0);
}
break;
case ((Ttypekind292244) 18):
{
gentupleinfo_536551_839829468(m0, t0, result0);
}
break;
default:
{
NimStringDesc* LOC58;
LOC58 = (NimStringDesc*)0;
LOC58 = rawNewString(reprEnum((NI)(*t0).kind, (&NTI292244))->Sup.len + 13);
appendString(LOC58, ((NimStringDesc*) &T839829468_137));
appendString(LOC58, reprEnum((NI)(*t0).kind, (&NTI292244)));
appendChar(LOC58, 41);
internalerror_196113_155036129(LOC58);
}
break;
}
{
if (!!(((*t0).deepcopy == NIM_NIL))) goto LA61;
gendeepcopyproc_538066_839829468(m0, (*t0).deepcopy, result0);
}
goto LA59;
LA61: ;
{
if (!!(((*origtype0).deepcopy == NIM_NIL))) goto LA64;
gendeepcopyproc_538066_839829468(m0, (*origtype0).deepcopy, result0);
}
goto LA59;
LA64: ;
LA59: ;
LOC66 = (Ropeobj178006*)0;
LOC66 = rope_178277_2381377266(((NimStringDesc*) &T839829468_128));
LOC67 = (Ropeobj178006*)0;
LOC67 = HEX26_178418_2381377266(LOC66, result0);
LOC68 = (Ropeobj178006*)0;
LOC68 = rope_178277_2381377266(((NimStringDesc*) &T839829468_117));
result0 = HEX26_178418_2381377266(LOC67, LOC68);
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, localdebuginfo_538449_839829468)(Tcproc529021* p0, Tsym292834* s0) {
Ropeobj178006* a0;
TY535235 LOC16;
NimStringDesc* LOC17;
{ {
if (!!(((163840 & (*p0).options) == 163840))) goto LA3;
goto BeforeRet;
}
LA3: ;
{
Ttype292840* LOC7;
LOC7 = (Ttype292840*)0;
LOC7 = skiptypes_296099_850551059((*s0).typ, IL64(211106240964864));
if (!((IL64(281475110928384) &((NU64)1<<((NU)((*LOC7).kind)&63U)))!=0)) goto LA8;
goto BeforeRet;
}
LA8: ;
a0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_52), (*s0).loc.r);
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = ((*s0).kind == ((Tsymkind292435) 3));
if (!(LOC12)) goto LA13;
LOC12 = ccgintroducedptr_533611_839829468(s0);
LA13: ;
if (!LOC12) goto LA14;
a0 = (*s0).loc.r;
}
LA14: ;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rope_178401_2381377266(((NI64) ((*p0).maxframelen)));
LOC17 = (NimStringDesc*)0;
LOC17 = nsuNormalize((*(*s0).name).s);
LOC16[1] = makecstring_191638_155036129(LOC17);
LOC16[2] = a0;
LOC16[3] = gentypeinfo_535941_839829468((*p0).module, (*s0).loc.t);
linef_532700_839829468(p0, ((Tcprocsection529011) 1), ((NimStringDesc*) &T839829468_126), LOC16, 4);
(*p0).maxframelen += ((NI) 1);
(*p0).blocks->data[(NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1))].framelen += ((NI) 1);
}BeforeRet: ;
}
N_NIMCALL(void, assignlocalvar_538614_839829468)(Tcproc529021* p0, Tsym292834* s0) {
Ropeobj178006* decl0;
Ropeobj178006* LOC1;
Ropeobj178006* LOC2;
LOC1 = (Ropeobj178006*)0;
LOC1 = localvardecl_538532_839829468(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX26_178447_2381377266(LOC1, ((NimStringDesc*) &T839829468_125));
decl0 = HEX26_178447_2381377266(LOC2, tnl_176644_4151366050);
line_532690_839829468(p0, ((Tcprocsection529011) 0), decl0);
localdebuginfo_538449_839829468(p0, s0);
}
N_NIMCALL(void, initlocalvar_538398_839829468)(Tcproc529021* p0, Tsym292834* v0, NIM_BOOL immediateasgn0) {
{
if (!!((((*v0).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0))) goto LA3;
{
if (!!(immediateasgn0)) goto LA7;
constructloc_538388_839829468(p0, (&(*v0).loc), NIM_FALSE);
}
LA7: ;
}
LA3: ;
}
N_NIMCALL(void, fillresult_533865_839829468)(Tsym292834* param0) {
TY533289 LOC1;
Ropeobj178006* LOC2;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_210), LOC1, 0);
fillloc_532282_839829468((&(*param0).loc), ((Tlockind292808) 4), (*param0).typ, LOC2, ((Tstorageloc292812) 2));
{
NIM_BOOL LOC5;
Tctypekind529007 LOC6;
LOC5 = (NIM_BOOL)0;
LOC6 = (Tctypekind529007)0;
LOC6 = mapreturntype_533447_839829468((*param0).typ);
LOC5 = !((LOC6 == ((Tctypekind529007) 17)));
if (!(LOC5)) goto LA7;
LOC5 = isinvalidreturntype_533550_839829468((*param0).typ);
LA7: ;
if (!LOC5) goto LA8;
(*param0).loc.flags |= ((NU16)1)<<((((Tlocflag292810) 0))%(sizeof(NU16)*8));
(*param0).loc.s = ((Tstorageloc292812) 0);
}
LA8: ;
}
N_NIMCALL(void, assignparam_538994_839829468)(Tcproc529021* p0, Tsym292834* s0) {
localdebuginfo_538449_839829468(p0, s0);
}
N_NIMCALL(void, closuresetup_560158_839829468)(Tcproc529021* p0, Tsym292834* prc0) {
Tnode292802* ls0;
Tnode292802* LOC5;
Tsym292834* env0;
TY532811 LOC10;
{ {
if (!!((((*(*prc0).typ).flags &(1U<<((NU)(((Ttypeflag292431) 11))&31U)))!=0))) goto LA3;
goto BeforeRet;
}
LA3: ;
LOC5 = (Tnode292802*)0;
LOC5 = HEX5BHEX5D_293238_850551059((*prc0).ast, ((NI) 3));
ls0 = lastson_295364_850551059(LOC5);
{
if (!!(((*ls0).kind == ((Tnodekind292020) 3)))) goto LA8;
internalerror_196100_155036129((*prc0).info, ((NimStringDesc*) &T839829468_211));
}
LA8: ;
env0 = (*ls0).kindU.S4.sym;
assignlocalvar_538614_839829468(p0, env0);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rdloc_538188_839829468((&(*env0).loc));
LOC10[1] = gettypedesc_535673_839829468((*p0).module, (*env0).typ);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_212), LOC10, 2);
}BeforeRet: ;
}
N_NIMCALL(Ropeobj178006*, initgcframe_538435_839829468)(Tcproc529021* p0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
TY178507 LOC5;
if (!(((NI) 0) < ((NI) ((*p0).gcframeid)))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = (*p0).gcframetype;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_217), LOC5, 1);
}
LA3: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, initframe_560140_839829468)(Tcproc529021* p0, Ropeobj178006* procname0, Ropeobj178006* filename0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_218));
{
Ropeobj178006* LOC6;
TY535235 LOC7;
if (!(((NI) 0) < (*p0).maxframelen)) goto LA4;
LOC6 = (Ropeobj178006*)0;
LOC6 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_219));
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = procname0;
LOC7[1] = filename0;
LOC7[2] = rope_178401_2381377266(((NI64) ((*p0).maxframelen)));
LOC7[3] = rope_178401_2381377266(((NI64) ((*p0).blocks->data[((NI) 0)].framelen)));
result0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_220), LOC7, 4);
}
goto LA2;
LA4: ;
{
TY532811 LOC9;
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = procname0;
LOC9[1] = filename0;
result0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_221), LOC9, 2);
}
LA2: ;
return result0;
}
N_NIMCALL(void, appcg_532648_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, args0, args0Len0);
add_178482_2381377266(LOC1, LOC2);
}
N_NIMCALL(Ropeobj178006*, deinitgcframe_538441_839829468)(Tcproc529021* p0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
TY533289 LOC5;
if (!(((NI) 0) < ((NI) ((*p0).gcframeid)))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_225), LOC5, 0);
}
LA3: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, deinitframe_560150_839829468)(Tcproc529021* p0) {
Ropeobj178006* result0;
TY533289 LOC1;
result0 = (Ropeobj178006*)0;
memset((void*)LOC1, 0, sizeof(LOC1));
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_226), LOC1, 0);
return result0;
}
N_NIMCALL(void, genprocaux_560284_839829468)(Tcgen529027* m0, Tsym292834* prc0) {
Tcproc529021* p0;
Ropeobj178006* header0;
Ropeobj178006* returnstmt0;
Tnode292802* LOC51;
Ropeobj178006* generatedproc0;
p0 = newproc_529206_3723162438(prc0, m0);
header0 = genprocheader_535867_839829468(m0, prc0);
returnstmt0 = NIM_NIL;
{
NIM_BOOL LOC3;
Tsym292834* res0;
LOC3 = (NIM_BOOL)0;
LOC3 = !((((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0));
if (!(LOC3)) goto LA4;
LOC3 = !(((*(*prc0).typ).sons->data[((NI) 0)] == NIM_NIL));
LA4: ;
if (!LOC3) goto LA5;
{
NI LOC9;
LOC9 = (NI)0;
LOC9 = len_293081_850551059((*prc0).ast);
if (!(LOC9 <= ((NI) 7))) goto LA10;
internalerror_196100_155036129((*prc0).info, ((NimStringDesc*) &T839829468_120));
}
LA10: ;
res0 = (*(*(*prc0).ast).kindU.S6.sons->data[((NI) 7)]).kindU.S4.sym;
{
NIM_BOOL LOC14;
TY178507 LOC34;
LOC14 = (NIM_BOOL)0;
LOC14 = isinvalidreturntype_533550_839829468((*(*prc0).typ).sons->data[((NI) 0)]);
if (!!(LOC14)) goto LA15;
{
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0)) goto LA19;
(*res0).flags |= ((NU32)1)<<((((Tsymflag292184) 12))%(sizeof(NU32)*8));
}
LA19: ;
{
NIM_BOOL LOC23;
NIM_BOOL LOC24;
NIM_BOOL LOC26;
Tnode292802* val0;
Tnode292802* LOC29;
Ropeobj178006* decl0;
Tloc292816 a0;
TY532811 LOC32;
LOC23 = (NIM_BOOL)0;
LOC24 = (NIM_BOOL)0;
LOC24 = (((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0);
if (!(LOC24)) goto LA25;
LOC26 = (NIM_BOOL)0;
LOC26 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC26) goto LA27;
LOC26 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA27: ;
LOC24 = LOC26;
LA25: ;
LOC23 = LOC24;
if (!(LOC23)) goto LA28;
LOC29 = (Tnode292802*)0;
LOC29 = getbody_335226_1724185294(prc0);
val0 = easyresultasgn_560191_839829468(LOC29);
LOC23 = !((val0 == NIM_NIL));
LA28: ;
if (!LOC23) goto LA30;
decl0 = localvardecl_538532_839829468(p0, res0);
memset((void*)(&a0), 0, sizeof(a0));
initlocexprsingleuse_539289_839829468(p0, val0, (&a0));
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = decl0;
LOC32[1] = rdloc_538188_839829468((&a0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC32, 2);
}
goto LA21;
LA30: ;
{
assignlocalvar_538614_839829468(p0, res0);
initlocalvar_538398_839829468(p0, res0, NIM_FALSE);
}
LA21: ;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = rdloc_538188_839829468((&(*res0).loc));
returnstmt0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_209), LOC34, 1);
}
goto LA12;
LA15: ;
{
fillresult_533865_839829468(res0);
assignparam_538994_839829468(p0, res0);
{
Ttype292840* LOC38;
LOC38 = (Ttype292840*)0;
LOC38 = skiptypes_296099_850551059((*res0).typ, IL64(211106232576256));
if (!((*LOC38).kind == ((Ttypekind292244) 16))) goto LA39;
(*res0).loc.s = ((Tstorageloc292812) 0);
}
LA39: ;
}
LA12: ;
}
LA5: ;
{
NI i_560627_839829468;
NI HEX3Atmp_560743_839829468;
NI LOC42;
NI res_560746_839829468;
i_560627_839829468 = (NI)0;
HEX3Atmp_560743_839829468 = (NI)0;
LOC42 = (NI)0;
LOC42 = sonslen_295351_850551059((*(*prc0).typ).n);
HEX3Atmp_560743_839829468 = (NI)(LOC42 - ((NI) 1));
res_560746_839829468 = ((NI) 1);
{
while (1) {
if (!(res_560746_839829468 <= HEX3Atmp_560743_839829468)) goto LA44;
i_560627_839829468 = res_560746_839829468;
{
Tsym292834* param0;
param0 = (*(*(*(*prc0).typ).n).kindU.S6.sons->data[i_560627_839829468]).kindU.S4.sym;
{
NIM_BOOL LOC48;
LOC48 = (NIM_BOOL)0;
LOC48 = iscompiletimeonly_328706_3876443242((*param0).typ);
if (!LOC48) goto LA49;
goto LA45;
}
LA49: ;
assignparam_538994_839829468(p0, param0);
} LA45: ;
res_560746_839829468 += ((NI) 1);
} LA44: ;
}
}
closuresetup_560158_839829468(p0, prc0);
LOC51 = (Tnode292802*)0;
LOC51 = getbody_335226_1724185294(prc0);
genstmts_539244_839829468(p0, LOC51);
generatedproc0 = (Ropeobj178006*)0;
{
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 14))&31U)))!=0)) goto LA54;
{
if (!((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 6))&7U)))!=0)) goto LA58;
header0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_213), header0);
}
LA58: ;
}
LA54: ;
{
TY535235 LOC68;
Ropeobj178006** LOC69;
Ropeobj178006** LOC70;
Ropeobj178006** LOC71;
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0)) goto LA62;
{
if (!((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 6))&7U)))!=0)) goto LA66;
header0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_214), header0);
}
LA66: ;
memset((void*)LOC68, 0, sizeof(LOC68));
LOC68[0] = header0;
LOC69 = (Ropeobj178006**)0;
LOC69 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
LOC68[1] = (*LOC69);
LOC70 = (Ropeobj178006**)0;
LOC70 = s_529179_3723162438(p0, ((Tcprocsection529011) 1));
LOC68[2] = (*LOC70);
LOC71 = (Ropeobj178006**)0;
LOC71 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
LOC68[3] = (*LOC71);
generatedproc0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_215), LOC68, 4);
}
goto LA60;
LA62: ;
{
TY178507 LOC73;
Ropeobj178006* LOC74;
Ropeobj178006** LOC93;
Ropeobj178006** LOC94;
Ropeobj178006* LOC101;
TY533289 LOC107;
Ropeobj178006* LOC108;
memset((void*)LOC73, 0, sizeof(LOC73));
LOC73[0] = header0;
generatedproc0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_216), LOC73, 1);
LOC74 = (Ropeobj178006*)0;
LOC74 = initgcframe_538435_839829468(p0);
add_178482_2381377266(&generatedproc0, LOC74);
{
Ropeobj178006** LOC79;
Ropeobj178006* procname0;
Ropeobj178006* LOC80;
Ropeobj178006* LOC81;
if (!(((*prc0).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0)) goto LA77;
LOC79 = (Ropeobj178006**)0;
LOC79 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
add_178482_2381377266(&generatedproc0, (*LOC79));
procname0 = makecstring_191638_155036129((*(*prc0).name).s);
LOC80 = (Ropeobj178006*)0;
LOC80 = quotedfilename_196818_155036129((*prc0).info);
LOC81 = (Ropeobj178006*)0;
LOC81 = initframe_560140_839829468(p0, procname0, LOC80);
add_178482_2381377266(&generatedproc0, LOC81);
}
goto LA75;
LA77: ;
{
Ropeobj178006** LOC83;
LOC83 = (Ropeobj178006**)0;
LOC83 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
add_178482_2381377266(&generatedproc0, (*LOC83));
}
LA75: ;
{
TY533289 LOC88;
if (!(((*prc0).options &(1U<<((NU)(((Toption169009) 19))&31U)))!=0)) goto LA86;
memset((void*)LOC88, 0, sizeof(LOC88));
appcg_532648_839829468(p0, ((Tcprocsection529011) 1), ((NimStringDesc*) &T839829468_222), LOC88, 0);
}
LA86: ;
{
if (!(*p0).beforeretneeded) goto LA91;
add_178487_2381377266(&generatedproc0, ((NimStringDesc*) &T839829468_223));
}
LA91: ;
LOC93 = (Ropeobj178006**)0;
LOC93 = s_529179_3723162438(p0, ((Tcprocsection529011) 1));
add_178482_2381377266(&generatedproc0, (*LOC93));
LOC94 = (Ropeobj178006**)0;
LOC94 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(&generatedproc0, (*LOC94));
{
TY533289 LOC99;
Ropeobj178006* LOC100;
if (!(*p0).beforeretneeded) goto LA97;
memset((void*)LOC99, 0, sizeof(LOC99));
LOC100 = (Ropeobj178006*)0;
LOC100 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_224), LOC99, 0);
add_178482_2381377266(&generatedproc0, LOC100);
}
LA97: ;
LOC101 = (Ropeobj178006*)0;
LOC101 = deinitgcframe_538441_839829468(p0);
add_178482_2381377266(&generatedproc0, LOC101);
{
Ropeobj178006* LOC106;
if (!(((*prc0).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0)) goto LA104;
LOC106 = (Ropeobj178006*)0;
LOC106 = deinitframe_560150_839829468(p0);
add_178482_2381377266(&generatedproc0, LOC106);
}
LA104: ;
add_178482_2381377266(&generatedproc0, returnstmt0);
memset((void*)LOC107, 0, sizeof(LOC107));
LOC108 = (Ropeobj178006*)0;
LOC108 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_227), LOC107, 0);
add_178482_2381377266(&generatedproc0, LOC108);
}
LA60: ;
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 10))- 0], generatedproc0);
}
N_NIMCALL(Tcgen529027*, findpendingmodule_532241_839829468)(Tcgen529027* m0, Tsym292834* s0) {
Tcgen529027* result0;
Tsym292834* ms0;
result0 = (Tcgen529027*)0;
ms0 = getmodule_299123_2984716966(s0);
result0 = gmodules_529170_3723162438->data[(*ms0).position];
return result0;
}
N_NIMCALL(NIM_BOOL, isgetprocaddr_559443_839829468)(Tlib292820* lib0) {
NIM_BOOL result0;
Tnode292802* n0;
NIM_BOOL LOC1;
NIM_BOOL LOC2;
result0 = (NIM_BOOL)0;
n0 = (*lib0).path;
LOC1 = (NIM_BOOL)0;
LOC2 = (NIM_BOOL)0;
LOC2 = ((*n0).kind == ((Tnodekind292020) 27) || (*n0).kind == ((Tnodekind292020) 29) || (*n0).kind == ((Tnodekind292020) 30) || (*n0).kind == ((Tnodekind292020) 31) || (*n0).kind == ((Tnodekind292020) 26) || (*n0).kind == ((Tnodekind292020) 28) || (*n0).kind == ((Tnodekind292020) 32));
if (!(LOC2)) goto LA3;
LOC2 = !(((*n0).typ == NIM_NIL));
LA3: ;
LOC1 = LOC2;
if (!(LOC1)) goto LA4;
LOC1 = ((100663296 &((NU64)1<<((NU)((*(*n0).typ).kind)&63U)))!=0);
LA4: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, initlocexpr_539283_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* result0) {
initloc_532273_839829468(result0, ((Tlockind292808) 0), (*e0).typ, ((Tstorageloc292812) 0));
expr_539248_839829468(p0, e0, result0);
}
N_NIMCALL(void, loaddynamiclib_559481_839829468)(Tcgen529027* m0, Tlib292820* lib0) {
{
Ropeobj178006* tmp0;
TY178507 LOC5;
if (!!((*lib0).generated)) goto LA3;
(*lib0).generated = NIM_TRUE;
tmp0 = gettempname_533598_839829468(m0);
asgnRefNoCycle((void**) (&(*lib0).name), tmp0);
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = tmp0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_228), LOC5, 1);
{
TY135002* s0;
Ropeobj178006* loadlib0;
TY532811 LOC18;
if (!((*(*lib0).path).kind >= ((Tnodekind292020) 20) && (*(*lib0).path).kind <= ((Tnodekind292020) 22))) goto LA8;
s0 = (TY135002*) newSeq((&NTI135002), 0);
libcandidates_170605_2607990831((*(*lib0).path).kindU.S3.strval, (&s0));
rawmessage_194612_155036129(((Tmsgkind191002) 286), (*(*lib0).path).kindU.S3.strval);
loadlib0 = NIM_NIL;
{
NI i_559847_839829468;
NI HEX3Atmp_559902_839829468;
NI res_559905_839829468;
i_559847_839829468 = (NI)0;
HEX3Atmp_559902_839829468 = (NI)0;
HEX3Atmp_559902_839829468 = (s0 ? (s0->Sup.len-1) : -1);
res_559905_839829468 = ((NI) 0);
{
while (1) {
TY532811 LOC17;
if (!(res_559905_839829468 <= HEX3Atmp_559902_839829468)) goto LA12;
i_559847_839829468 = res_559905_839829468;
(*m0).labels += ((NI) 1);
{
if (!(((NI) 0) < i_559847_839829468)) goto LA15;
add_178487_2381377266(&loadlib0, ((NimStringDesc*) &T839829468_229));
}
LA15: ;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = tmp0;
LOC17[1] = getstrlit_549468_839829468(m0, s0->data[i_559847_839829468]);
appcg_532632_839829468(m0, &loadlib0, ((NimStringDesc*) &T839829468_230), LOC17, 2);
res_559905_839829468 += ((NI) 1);
} LA12: ;
}
}
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = loadlib0;
LOC18[1] = getstrlit_549468_839829468(m0, (*(*lib0).path).kindU.S3.strval);
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 16))- 0], ((NimStringDesc*) &T839829468_231), LOC18, 2);
}
goto LA6;
LA8: ;
{
Tcproc529021* p0;
Tloc292816 dest0;
Ropeobj178006** LOC20;
Ropeobj178006** LOC21;
Ropeobj178006** LOC22;
TY532811 LOC23;
p0 = newproc_529206_3723162438(NIM_NIL, m0);
(*p0).options = ((*p0).options & ~ 163840);
memset((void*)(&dest0), 0, sizeof(dest0));
initlocexpr_539283_839829468(p0, (*lib0).path, (&dest0));
LOC20 = (Ropeobj178006**)0;
LOC20 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], (*LOC20));
LOC21 = (Ropeobj178006**)0;
LOC21 = s_529179_3723162438(p0, ((Tcprocsection529011) 1));
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 16))- 0], (*LOC21));
LOC22 = (Ropeobj178006**)0;
LOC22 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 16))- 0], (*LOC22));
memset((void*)LOC23, 0, sizeof(LOC23));
LOC23[0] = tmp0;
LOC23[1] = rdloc_538188_839829468((&dest0));
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 16))- 0], ((NimStringDesc*) &T839829468_232), LOC23, 2);
}
LA6: ;
}
LA3: ;
{
if (!((*lib0).name == NIM_NIL)) goto LA26;
internalerror_196113_155036129(((NimStringDesc*) &T839829468_233));
}
LA26: ;
}
N_NIMCALL(Ropeobj178006*, mangledynlibproc_538816_839829468)(Tsym292834* sym0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 16))&31U)))!=0)) goto LA3;
result0 = rope_178277_2381377266((*(*sym0).name).s);
}
goto LA1;
LA3: ;
{
TY178507 LOC6;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rope_178401_2381377266(((NI64) ((*sym0).Sup.id)));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_234), LOC6, 1);
}
LA1: ;
return result0;
}
N_NIMCALL(void, symindynamiclib_559929_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
Tlib292820* lib0;
NIM_BOOL iscall0;
Ropeobj178006* extname0;
Ropeobj178006* tmp0;
TY532811 LOC43;
lib0 = (*sym0).annex;
iscall0 = isgetprocaddr_559443_839829468(lib0);
extname0 = (*sym0).loc.r;
{
if (!!(iscall0)) goto LA3;
loaddynamiclib_559481_839829468(m0, lib0);
}
LA3: ;
tmp0 = mangledynlibproc_538816_839829468(sym0);
asgnRefNoCycle((void**) (&(*sym0).loc.r), tmp0);
asgnRefNoCycle((void**) (&(*(*sym0).typ).sym), NIM_NIL);
(*m0).labels += ((NI) 2);
{
Tnode292802* n0;
Tloc292816 a0;
Tnode292802* LOC9;
Ropeobj178006* params0;
Ropeobj178006* LOC10;
Ropeobj178006* load0;
TY535235 LOC17;
NimStringDesc* LOC18;
Tnode292802* last0;
NimStringDesc* idx0;
if (!iscall0) goto LA7;
n0 = (*lib0).path;
memset((void*)(&a0), 0, sizeof(a0));
LOC9 = (Tnode292802*)0;
LOC9 = HEX5BHEX5D_293238_850551059(n0, ((NI) 0));
initlocexpr_539283_839829468((*m0).initproc, LOC9, (&a0));
LOC10 = (Ropeobj178006*)0;
LOC10 = rdloc_538188_839829468((&a0));
params0 = HEX26_178447_2381377266(LOC10, ((NimStringDesc*) &T839829468_118));
{
NI i_559964_839829468;
NI HEX3Atmp_560025_839829468;
NI LOC12;
NI res_560028_839829468;
i_559964_839829468 = (NI)0;
HEX3Atmp_560025_839829468 = (NI)0;
LOC12 = (NI)0;
LOC12 = len_293081_850551059(n0);
HEX3Atmp_560025_839829468 = (NI)(LOC12 - ((NI) 2));
res_560028_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* LOC15;
Ropeobj178006* LOC16;
if (!(res_560028_839829468 <= HEX3Atmp_560025_839829468)) goto LA14;
i_559964_839829468 = res_560028_839829468;
LOC15 = (Tnode292802*)0;
LOC15 = HEX5BHEX5D_293238_850551059(n0, i_559964_839829468);
initlocexpr_539283_839829468((*m0).initproc, LOC15, (&a0));
LOC16 = (Ropeobj178006*)0;
LOC16 = rdloc_538188_839829468((&a0));
add_178482_2381377266(¶ms0, LOC16);
add_178487_2381377266(¶ms0, ((NimStringDesc*) &T839829468_110));
res_560028_839829468 += ((NI) 1);
} LA14: ;
}
}
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = tmp0;
LOC17[1] = gettypedesc_535673_839829468(m0, (*sym0).typ);
LOC17[2] = params0;
LOC18 = (NimStringDesc*)0;
LOC18 = HEX24_178856_2381377266(extname0);
LOC17[3] = makecstring_191638_155036129(LOC18);
load0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_235), LOC17, 4);
last0 = lastson_295364_850551059(n0);
{
if (!((*last0).kind == ((Tnodekind292020) 58))) goto LA21;
last0 = (*last0).kindU.S6.sons->data[((NI) 1)];
}
LA21: ;
{
NimStringDesc* LOC27;
if (!!(((*last0).kind == ((Tnodekind292020) 20)))) goto LA25;
LOC27 = (NimStringDesc*)0;
LOC27 = HEX24_196185_1689653243(T839829468_236);
internalerror_196113_155036129(LOC27);
}
LA25: ;
idx0 = (*last0).kindU.S3.strval;
{
Ropeobj178006** LOC32;
if (!((idx0 ? idx0->Sup.len : 0) == ((NI) 0))) goto LA30;
LOC32 = (Ropeobj178006**)0;
LOC32 = s_529179_3723162438((*m0).initproc, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC32, load0);
}
goto LA28;
LA30: ;
{
NIM_BOOL LOC34;
LOC34 = (NIM_BOOL)0;
LOC34 = ((idx0 ? idx0->Sup.len : 0) == ((NI) 1));
if (!(LOC34)) goto LA35;
LOC34 = (((NU8)(idx0->data[((NI) 0)])) >= ((NU8)(48)) && ((NU8)(idx0->data[((NI) 0)])) <= ((NU8)(57)));
LA35: ;
if (!LOC34) goto LA36;
add_178482_2381377266(&(*m0).extensionloaders[(((NU8)(idx0->data[((NI) 0)])))- 48], load0);
}
goto LA28;
LA36: ;
{
NimStringDesc* LOC39;
LOC39 = (NimStringDesc*)0;
LOC39 = rawNewString(idx0->Sup.len + 13);
appendString(LOC39, ((NimStringDesc*) &T839829468_237));
appendString(LOC39, idx0);
internalerror_196100_155036129((*sym0).info, LOC39);
}
LA28: ;
}
goto LA5;
LA7: ;
{
TY535235 LOC41;
NimStringDesc* LOC42;
memset((void*)LOC41, 0, sizeof(LOC41));
LOC41[0] = tmp0;
LOC41[1] = gettypedesc_535673_839829468(m0, (*sym0).typ);
LOC41[2] = (*lib0).name;
LOC42 = (NimStringDesc*)0;
LOC42 = HEX24_178856_2381377266(extname0);
LOC41[3] = makecstring_191638_155036129(LOC42);
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 16))- 0], ((NimStringDesc*) &T839829468_238), LOC41, 4);
}
LA5: ;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC43[0] = (*sym0).loc.r;
LOC43[1] = gettypedesc_535673_839829468(m0, (*sym0).loc.t);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_239), LOC43, 2);
}
N_NIMCALL(void, symindynamiclibpartial_560071_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
asgnRefNoCycle((void**) (&(*sym0).loc.r), mangledynlibproc_538816_839829468(sym0));
asgnRefNoCycle((void**) (&(*(*sym0).typ).sym), NIM_NIL);
}
N_NIMCALL(void, genprocnoforward_560906_839829468)(Tcgen529027* m0, Tsym292834* prc0) {
{ fillprocloc_539201_839829468(prc0);
useheader_532369_839829468(m0, prc0);
{
Ropeobj178006* LOC5;
if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 7))&15U)))!=0)) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = cgsym_532403_839829468(m0, (*(*prc0).name).s);
goto BeforeRet;
}
LA3: ;
genprocprototype_539254_839829468(m0, prc0);
{
if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0)) goto LA8;
}
goto LA6;
LA8: ;
{
if (!((*(*prc0).typ).callconv == ((Tcallingconvention292002) 5))) goto LA11;
{
NIM_BOOL LOC15;
LOC15 = (NIM_BOOL)0;
LOC15 = containsorincl_268862_2627731572((&(*m0).declaredthings), (*prc0).Sup.id);
if (!!(LOC15)) goto LA16;
genprocaux_560284_839829468(m0, prc0);
}
LA16: ;
}
goto LA6;
LA11: ;
{
Tcgen529027* q0;
if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0)) goto LA19;
q0 = findpendingmodule_532241_839829468(m0, prc0);
{
NIM_BOOL LOC23;
NIM_BOOL LOC25;
LOC23 = (NIM_BOOL)0;
LOC23 = !((q0 == NIM_NIL));
if (!(LOC23)) goto LA24;
LOC25 = (NIM_BOOL)0;
LOC25 = containsorincl_268862_2627731572((&(*q0).declaredthings), (*prc0).Sup.id);
LOC23 = !(LOC25);
LA24: ;
if (!LOC23) goto LA26;
symindynamiclib_559929_839829468(q0, prc0);
}
goto LA21;
LA26: ;
{
symindynamiclibpartial_560071_839829468(m0, prc0);
}
LA21: ;
}
goto LA6;
LA19: ;
{
Tcgen529027* q0;
if (!!((((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0))) goto LA30;
q0 = findpendingmodule_532241_839829468(m0, prc0);
{
NIM_BOOL LOC34;
NIM_BOOL LOC36;
LOC34 = (NIM_BOOL)0;
LOC34 = !((q0 == NIM_NIL));
if (!(LOC34)) goto LA35;
LOC36 = (NIM_BOOL)0;
LOC36 = containsorincl_268862_2627731572((&(*q0).declaredthings), (*prc0).Sup.id);
LOC34 = !(LOC36);
LA35: ;
if (!LOC34) goto LA37;
genprocaux_560284_839829468(q0, prc0);
}
LA37: ;
}
goto LA6;
LA30: ;
LA6: ;
}BeforeRet: ;
}
N_NIMCALL(void, genproc_532951_839829468)(Tcgen529027* m0, Tsym292834* prc0) {
{ {
NIM_BOOL LOC3;
NIM_BOOL LOC5;
LOC3 = (NIM_BOOL)0;
LOC3 = (((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 26))&31U)))!=0);
if (LOC3) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = isactivated_561431_839829468(prc0);
LOC3 = !(LOC5);
LA4: ;
if (!LOC3) goto LA6;
goto BeforeRet;
}
LA6: ;
fillprocloc_539201_839829468(prc0);
{
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 4))&31U)))!=0)) goto LA10;
addforwardedproc_532203_839829468(m0, prc0);
}
goto LA8;
LA10: ;
{
genprocnoforward_560906_839829468(m0, prc0);
{
NIM_BOOL LOC15;
NIM_BOOL LOC16;
LOC15 = (NIM_BOOL)0;
LOC16 = (NIM_BOOL)0;
LOC16 = ((65600 & (*prc0).flags) == 64);
if (!(LOC16)) goto LA17;
LOC16 = !((generatedheader_532201_839829468 == NIM_NIL));
LA17: ;
LOC15 = LOC16;
if (!(LOC15)) goto LA18;
LOC15 = !((((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0));
LA18: ;
if (!LOC15) goto LA19;
genprocprototype_539254_839829468(generatedheader_532201_839829468, prc0);
{
if (!((*(*prc0).typ).callconv == ((Tcallingconvention292002) 5))) goto LA23;
{
NIM_BOOL LOC27;
LOC27 = (NIM_BOOL)0;
LOC27 = containsorincl_268862_2627731572((&(*generatedheader_532201_839829468).declaredthings), (*prc0).Sup.id);
if (!!(LOC27)) goto LA28;
genprocaux_560284_839829468(generatedheader_532201_839829468, prc0);
}
LA28: ;
}
LA23: ;
}
LA19: ;
}
LA8: ;
}BeforeRet: ;
}
static N_INLINE(NIM_BOOL, emulatedthreadvars_532949_839829468)(void) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = ((71303168 & ~ gglobaloptions_169130_2607990831)==0);
return result0;
}
N_NIMCALL(void, declarethreadvar_538676_839829468)(Tcgen529027* m0, Tsym292834* s0, NIM_BOOL isextern0) {
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = emulatedthreadvars_532949_839829468();
if (!LOC3) goto LA4;
{
NIM_BOOL LOC8;
TY532811 LOC11;
LOC8 = (NIM_BOOL)0;
LOC8 = containsorincl_268862_2627731572((&nimtvdeclared_538675_839829468), (*s0).Sup.id);
if (!!(LOC8)) goto LA9;
nimtvdeps_538674_839829468 = (Ttypeseq292836*) incrSeqV2(&(nimtvdeps_538674_839829468)->Sup, sizeof(Ttype292840*));
asgnRefNoCycle((void**) (&nimtvdeps_538674_839829468->data[nimtvdeps_538674_839829468->Sup.len]), (*s0).loc.t);
++nimtvdeps_538674_839829468->Sup.len;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = gettypedesc_535673_839829468(m0, (*s0).loc.t);
LOC11[1] = (*s0).loc.r;
addf_179205_2381377266(&nimtv_538656_839829468, ((NimStringDesc*) &T839829468_54), LOC11, 2);
}
LA9: ;
}
goto LA1;
LA4: ;
{
Ropeobj178006* LOC21;
TY178507 LOC22;
{
if (!isextern0) goto LA15;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_240));
}
LA15: ;
{
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 22))&63U)))!=0)) goto LA19;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_241));
}
LA19: ;
LOC21 = (Ropeobj178006*)0;
LOC21 = gettypedesc_535673_839829468(m0, (*s0).loc.t);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], LOC21);
memset((void*)LOC22, 0, sizeof(LOC22));
LOC22[0] = (*s0).loc.r;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_242), LOC22, 1);
}
LA1: ;
}
N_NIMCALL(void, genvarprototypeaux_544254_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
Ropeobj178006* LOC1;
{ useheader_532369_839829468(m0, sym0);
LOC1 = (Ropeobj178006*)0;
LOC1 = manglename_533205_839829468(sym0);
fillloc_532282_839829468((&(*sym0).loc), ((Tlockind292808) 3), (*sym0).typ, LOC1, ((Tstorageloc292812) 3));
{
NIM_BOOL LOC4;
LOC4 = (NIM_BOOL)0;
LOC4 = (((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0);
if (LOC4) goto LA5;
LOC4 = containsorincl_268862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id);
LA5: ;
if (!LOC4) goto LA6;
goto BeforeRet;
}
LA6: ;
{
if (!!(((*(*sym0).owner).Sup.id == (*(*m0).module).Sup.id))) goto LA10;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 22))&31U)))!=0)) goto LA14;
declarethreadvar_538676_839829468(m0, sym0, NIM_TRUE);
}
goto LA12;
LA14: ;
{
Ropeobj178006* LOC17;
TY178507 LOC30;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_240));
LOC17 = (Ropeobj178006*)0;
LOC17 = gettypedesc_535673_839829468(m0, (*sym0).loc.t);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], LOC17);
{
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0)) goto LA20;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_53));
}
LA20: ;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 8))&31U)))!=0)) goto LA24;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_121));
}
LA24: ;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 7))&31U)))!=0)) goto LA28;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_122));
}
LA28: ;
memset((void*)LOC30, 0, sizeof(LOC30));
LOC30[0] = (*sym0).loc.r;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_242), LOC30, 1);
}
LA12: ;
}
LA10: ;
}BeforeRet: ;
}
N_NIMCALL(void, genvarprototype_539236_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
genvarprototypeaux_544254_839829468(m0, sym0);
}
N_NIMCALL(Ropeobj178006*, cgsym_532403_839829468)(Tcgen529027* m0, NimStringDesc* name0) {
Ropeobj178006* result0;
Tsym292834* sym0;
result0 = (Ropeobj178006*)0;
sym0 = getcompilerproc_338748_3937434831(name0);
{
if (!!((sym0 == NIM_NIL))) goto LA3;
switch ((*sym0).kind) {
case ((Tsymkind292435) 12):
case ((Tsymkind292435) 13):
case ((Tsymkind292435) 15):
case ((Tsymkind292435) 14):
{
genproc_532951_839829468(m0, sym0);
}
break;
case ((Tsymkind292435) 8):
case ((Tsymkind292435) 11):
case ((Tsymkind292435) 9):
{
genvarprototype_539236_839829468(m0, sym0);
}
break;
case ((Tsymkind292435) 7):
{
Ropeobj178006* LOC8;
LOC8 = (Ropeobj178006*)0;
LOC8 = gettypedesc_535673_839829468(m0, (*sym0).typ);
}
break;
default:
{
NimStringDesc* LOC10;
LOC10 = (NimStringDesc*)0;
LOC10 = rawNewString(name0->Sup.len + reprEnum((NI)(*sym0).kind, (&NTI292435))->Sup.len + 9);
appendString(LOC10, ((NimStringDesc*) &T839829468_243));
appendString(LOC10, name0);
appendString(LOC10, ((NimStringDesc*) &T839829468_244));
appendString(LOC10, reprEnum((NI)(*sym0).kind, (&NTI292435)));
internalerror_196113_155036129(LOC10);
}
break;
}
}
goto LA1;
LA3: ;
{
rawmessage_194612_155036129(((Tmsgkind191002) 68), name0);
}
LA1: ;
result0 = (*sym0).loc.r;
return result0;
}
N_NIMCALL(Ropeobj178006*, ropecg_532407_839829468)(Tcgen529027* m0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006* result0;
NI i0;
NI length0;
NI num0;
result0 = (Ropeobj178006*)0;
i0 = ((NI) 0);
length0 = (frmt0 ? frmt0->Sup.len : 0);
result0 = NIM_NIL;
num0 = ((NI) 0);
{
while (1) {
NI start0;
if (!(i0 < length0)) goto LA2;
{
if (!((NU8)(frmt0->data[i0]) == (NU8)(36))) goto LA5;
i0 += ((NI) 1);
switch (((NU8)(frmt0->data[i0]))) {
case 36:
{
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_19));
i0 += ((NI) 1);
}
break;
case 35:
{
i0 += ((NI) 1);
add_178482_2381377266(&result0, args0[num0]);
num0 += ((NI) 1);
}
break;
case 48 ... 57:
{
NI j0;
j0 = ((NI) 0);
{
while (1) {
j0 = (NI)((NI)((NI)(j0 * ((NI) 10)) + ((NI) (((NU8)(frmt0->data[i0]))))) - ((NI) 48));
i0 += ((NI) 1);
{
NIM_BOOL LOC14;
LOC14 = (NIM_BOOL)0;
LOC14 = (length0 <= i0);
if (LOC14) goto LA15;
LOC14 = !((((NU8)(frmt0->data[i0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[i0])) <= ((NU8)(57))));
LA15: ;
if (!LOC14) goto LA16;
goto LA10;
}
LA16: ;
}
} LA10: ;
num0 = j0;
{
NimStringDesc* LOC22;
NimStringDesc* LOC23;
if (!((NI)((args0Len0-1) + ((NI) 1)) < j0)) goto LA20;
LOC22 = (NimStringDesc*)0;
LOC23 = (NimStringDesc*)0;
LOC23 = nimIntToStr(j0);
LOC22 = rawNewString(LOC23->Sup.len + 30);
appendString(LOC22, ((NimStringDesc*) &T839829468_20));
appendString(LOC22, LOC23);
internalerror_196113_155036129(LOC22);
}
LA20: ;
add_178482_2381377266(&result0, args0[(NI)(j0 - ((NI) 1))]);
}
break;
case 110:
{
{
if (!!(((goptions_169128_2607990831 &(1U<<((NU)(((Toption169009) 10))&31U)))!=0))) goto LA27;
add_178482_2381377266(&result0, rnl_178903_2381377266);
}
LA27: ;
i0 += ((NI) 1);
}
break;
case 78:
{
add_178482_2381377266(&result0, rnl_178903_2381377266);
i0 += ((NI) 1);
}
break;
default:
{
NimStringDesc* LOC31;
LOC31 = (NimStringDesc*)0;
LOC31 = rawNewString(31);
appendString(LOC31, ((NimStringDesc*) &T839829468_20));
appendChar(LOC31, frmt0->data[i0]);
internalerror_196113_155036129(LOC31);
}
break;
}
}
goto LA3;
LA5: ;
{
NIM_BOOL LOC33;
NI j0;
NimStringDesc* ident0;
Ropeobj178006* LOC39;
LOC33 = (NIM_BOOL)0;
LOC33 = ((NU8)(frmt0->data[i0]) == (NU8)(35));
if (!(LOC33)) goto LA34;
LOC33 = (((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) >= ((NU8)(97)) && ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) <= ((NU8)(122)) || ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) >= ((NU8)(65)) && ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) <= ((NU8)(90)) || ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(95)));
LA34: ;
if (!LOC33) goto LA35;
i0 += ((NI) 1);
j0 = i0;
{
while (1) {
if (!(((NU8)(frmt0->data[j0])) >= ((NU8)(97)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(122)) || ((NU8)(frmt0->data[j0])) >= ((NU8)(65)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(90)) || ((NU8)(frmt0->data[j0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(57)) || ((NU8)(frmt0->data[j0])) == ((NU8)(95)))) goto LA38;
j0 += ((NI) 1);
} LA38: ;
}
ident0 = copyStrLast(frmt0, i0, (NI)(j0 - ((NI) 1)));
i0 = j0;
LOC39 = (Ropeobj178006*)0;
LOC39 = cgsym_532403_839829468(m0, ident0);
add_178482_2381377266(&result0, LOC39);
}
goto LA3;
LA35: ;
{
NIM_BOOL LOC41;
NI j0;
NimStringDesc* LOC47;
Ropeobj178006* LOC48;
LOC41 = (NIM_BOOL)0;
LOC41 = ((NU8)(frmt0->data[i0]) == (NU8)(35));
if (!(LOC41)) goto LA42;
LOC41 = ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(36));
LA42: ;
if (!LOC41) goto LA43;
i0 += ((NI) 2);
j0 = ((NI) 0);
{
while (1) {
if (!(((NU8)(frmt0->data[i0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[i0])) <= ((NU8)(57)))) goto LA46;
j0 = (NI)((NI)((NI)(j0 * ((NI) 10)) + ((NI) (((NU8)(frmt0->data[i0]))))) - ((NI) 48));
i0 += ((NI) 1);
} LA46: ;
}
LOC47 = (NimStringDesc*)0;
LOC47 = HEX24_178856_2381377266(args0[(NI)(j0 - ((NI) 1))]);
LOC48 = (Ropeobj178006*)0;
LOC48 = cgsym_532403_839829468(m0, LOC47);
add_178482_2381377266(&result0, LOC48);
}
goto LA3;
LA43: ;
LA3: ;
start0 = i0;
{
while (1) {
if (!(i0 < length0)) goto LA50;
{
NIM_BOOL LOC53;
LOC53 = (NIM_BOOL)0;
LOC53 = !(((NU8)(frmt0->data[i0]) == (NU8)(36)));
if (!(LOC53)) goto LA54;
LOC53 = !(((NU8)(frmt0->data[i0]) == (NU8)(35)));
LA54: ;
if (!LOC53) goto LA55;
i0 += ((NI) 1);
}
goto LA51;
LA55: ;
{
goto LA49;
}
LA51: ;
} LA50: ;
} LA49: ;
{
NimStringDesc* LOC62;
if (!(start0 <= (NI)(i0 - ((NI) 1)))) goto LA60;
LOC62 = (NimStringDesc*)0;
LOC62 = copyStrLast(frmt0, start0, (NI)(i0 - ((NI) 1)));
add_178487_2381377266(&result0, LOC62);
}
LA60: ;
} LA2: ;
}
return result0;
}
static N_INLINE(NIM_BOOL, crossescppboundary_560754_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC2;
Tsym292834* LOC4;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC2 = (NIM_BOOL)0;
LOC2 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
if (!(LOC2)) goto LA3;
LOC4 = (Tsym292834*)0;
LOC4 = getmodule_299123_2984716966(sym0);
LOC2 = !((((*LOC4).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0));
LA3: ;
LOC1 = LOC2;
if (!(LOC1)) goto LA5;
LOC1 = !((gcmd_169132_2607990831 == ((Tcommands169076) 2)));
LA5: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, genprocprototype_539254_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
{ useheader_532369_839829468(m0, sym0);
{
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0)) goto LA3;
goto BeforeRet;
}
LA3: ;
{
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0)) goto LA7;
{
NIM_BOOL LOC11;
Tsym292834* LOC12;
NIM_BOOL LOC14;
TY532811 LOC17;
Ropeobj178006* LOC18;
LOC11 = (NIM_BOOL)0;
LOC12 = (Tsym292834*)0;
LOC12 = getmodule_299123_2984716966(sym0);
LOC11 = !(((*LOC12).Sup.id == (*(*m0).module).Sup.id));
if (!(LOC11)) goto LA13;
LOC14 = (NIM_BOOL)0;
LOC14 = containsorincl_268862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id);
LOC11 = !(LOC14);
LA13: ;
if (!LOC11) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = gettypedesc_535673_839829468(m0, (*sym0).loc.t);
LOC17[1] = mangledynlibproc_538816_839829468(sym0);
LOC18 = (Ropeobj178006*)0;
LOC18 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_245), LOC17, 2);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], LOC18);
}
LA15: ;
}
goto LA5;
LA7: ;
{
NIM_BOOL LOC20;
Ropeobj178006* header0;
TY178507 LOC47;
Ropeobj178006* LOC48;
LOC20 = (NIM_BOOL)0;
LOC20 = containsorincl_268862_2627731572((&(*m0).declaredprotos), (*sym0).Sup.id);
if (!!(LOC20)) goto LA21;
header0 = genprocheader_535867_839829468(m0, sym0);
{
NIM_BOOL LOC25;
LOC25 = (NIM_BOOL)0;
LOC25 = (((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 14))&31U)))!=0);
if (!(LOC25)) goto LA26;
LOC25 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 6))&7U)))!=0);
LA26: ;
if (!LOC25) goto LA27;
header0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_213), header0);
}
LA27: ;
{
NIM_BOOL LOC31;
LOC31 = (NIM_BOOL)0;
LOC31 = !(((*(*sym0).typ).callconv == ((Tcallingconvention292002) 5)));
if (!(LOC31)) goto LA32;
LOC31 = crossescppboundary_560754_839829468(m0, sym0);
LA32: ;
if (!LOC31) goto LA33;
header0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_246), header0);
}
LA33: ;
{
NIM_BOOL LOC37;
LOC37 = (NIM_BOOL)0;
LOC37 = (((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0);
if (!(LOC37)) goto LA38;
LOC37 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 7))&7U)))!=0);
LA38: ;
if (!LOC37) goto LA39;
add_178487_2381377266(&header0, ((NimStringDesc*) &T839829468_247));
}
LA39: ;
{
NIM_BOOL LOC43;
LOC43 = (NIM_BOOL)0;
LOC43 = (((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 14))&31U)))!=0);
if (!(LOC43)) goto LA44;
LOC43 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 7))&7U)))!=0);
LA44: ;
if (!LOC43) goto LA45;
add_178487_2381377266(&header0, ((NimStringDesc*) &T839829468_248));
}
LA45: ;
memset((void*)LOC47, 0, sizeof(LOC47));
LOC47[0] = header0;
LOC48 = (Ropeobj178006*)0;
LOC48 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_191), LOC47, 1);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 7))- 0], LOC48);
}
goto LA5;
LA21: ;
LA5: ;
}BeforeRet: ;
}
static N_INLINE(NIM_BOOL, usesnativegc_169177_2607990831)(void) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = (((Tgcmode169080) 5) <= gselectedgc_169133_2607990831);
return result0;
}
N_NIMCALL(void, genrefassign_538311_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0) {
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
TY532811 LOC8;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*dest0).s == ((Tstorageloc292812) 2));
if (LOC3) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = usesnativegc_169177_2607990831();
LOC3 = !(LOC5);
LA4: ;
if (!LOC3) goto LA6;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468(dest0);
LOC8[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC8, 2);
}
goto LA1;
LA6: ;
{
if (!((*dest0).s == ((Tstorageloc292812) 3))) goto LA10;
{
NIM_BOOL LOC14;
TY532811 LOC17;
LOC14 = (NIM_BOOL)0;
LOC14 = canformacycle_320123_3876443242((*dest0).t);
if (!LOC14) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = addrloc_538204_839829468(dest0);
LOC17[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_249), LOC17, 2);
}
goto LA12;
LA15: ;
{
TY532811 LOC19;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = addrloc_538204_839829468(dest0);
LOC19[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_250), LOC19, 2);
}
LA12: ;
}
goto LA1;
LA10: ;
{
TY532811 LOC21;
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = addrloc_538204_839829468(dest0);
LOC21[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_251), LOC21, 2);
}
LA1: ;
}
N_NIMCALL(void, optasgnloc_549789_839829468)(Tloc292816* a0, Ttype292840* t0, Ropeobj178006* field0, Tloc292816* Result) {
Ropeobj178006* LOC1;
Ropeobj178006* LOC2;
(*Result).k = ((Tlockind292808) 5);
(*Result).s = (*a0).s;
unsureAsgnRef((void**) (&(*Result).t), t0);
LOC1 = (Ropeobj178006*)0;
LOC1 = rdloc_538188_839829468(a0);
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX26_178447_2381377266(LOC1, ((NimStringDesc*) &T839829468_257));
unsureAsgnRef((void**) (&(*Result).r), HEX26_178418_2381377266(LOC2, field0));
}
N_NIMCALL(void, genoptasgntuple_550001_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0) {
Tassignmentflag538302Set newflags0;
Ttype292840* t_550053_839829468;
Ttype292840* LOC9;
{
if (!((*src0).s == ((Tstorageloc292812) 1))) goto LA3;
newflags0 = (flags0 | 1);
}
goto LA1;
LA3: ;
{
if (!(((*(*dest0).t).flags &(1U<<((NU)(((Ttypeflag292431) 6))&31U)))!=0)) goto LA6;
newflags0 = (flags0 & ~ 1);
}
goto LA1;
LA6: ;
{
newflags0 = flags0;
}
LA1: ;
LOC9 = (Ttype292840*)0;
LOC9 = skiptypes_296099_850551059((*dest0).t, IL64(211106232576256));
t_550053_839829468 = getuniquetype_528640_2036603609(LOC9);
{
NI i_550071_839829468;
NI HEX3Atmp_550077_839829468;
NI LOC11;
NI res_550080_839829468;
i_550071_839829468 = (NI)0;
HEX3Atmp_550077_839829468 = (NI)0;
LOC11 = (NI)0;
LOC11 = len_295339_850551059(t_550053_839829468);
HEX3Atmp_550077_839829468 = (LOC11 - 1);
res_550080_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* t0;
Ropeobj178006* field0;
TY178507 LOC14;
Tloc292816 LOC15;
Tloc292816 LOC16;
if (!(res_550080_839829468 <= HEX3Atmp_550077_839829468)) goto LA13;
i_550071_839829468 = res_550080_839829468;
t0 = (*t_550053_839829468).sons->data[i_550071_839829468];
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rope_178401_2381377266(((NI64) (i_550071_839829468)));
field0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_260), LOC14, 1);
memset((void*)(&LOC15), 0, sizeof(LOC15));
optasgnloc_549789_839829468(dest0, t0, field0, (&LOC15));
memset((void*)(&LOC16), 0, sizeof(LOC16));
optasgnloc_549789_839829468(src0, t0, field0, (&LOC16));
genassignment_539264_839829468(p0, (&LOC15), (&LOC16), newflags0);
res_550080_839829468 += ((NI) 1);
} LA13: ;
}
}
}
N_NIMCALL(void, gengenericasgn_550167_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0) {
{
NIM_BOOL LOC3;
Ttype292840* LOC5;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((flags0 &(1U<<((NU)(((Tassignmentflag538302) 0))&7U)))!=0));
if (LOC3) goto LA4;
LOC5 = (Ttype292840*)0;
LOC5 = skiptypes_296099_850551059((*dest0).t, IL64(211106242013440));
LOC3 = (((*LOC5).flags &(1U<<((NU)(((Ttypeflag292431) 6))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA6;
{
NIM_BOOL LOC10;
NIM_BOOL LOC12;
TY535238 LOC15;
LOC10 = (NIM_BOOL)0;
LOC10 = ((*dest0).s == ((Tstorageloc292812) 2));
if (LOC10) goto LA11;
LOC12 = (NIM_BOOL)0;
LOC12 = usesnativegc_169177_2607990831();
LOC10 = !(LOC12);
LA11: ;
if (!LOC10) goto LA13;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = addrloc_538204_839829468(dest0);
LOC15[1] = addrloc_538204_839829468(src0);
LOC15[2] = rdloc_538188_839829468(dest0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_261), LOC15, 3);
}
goto LA8;
LA13: ;
{
TY535238 LOC17;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = addrloc_538204_839829468(dest0);
LOC17[1] = addrloc_538204_839829468(src0);
LOC17[2] = gentypeinfo_535941_839829468((*p0).module, (*dest0).t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_262), LOC17, 3);
}
LA8: ;
}
goto LA1;
LA6: ;
{
TY535238 LOC19;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = addrloc_538204_839829468(dest0);
LOC19[1] = addrloc_538204_839829468(src0);
LOC19[2] = gentypeinfo_535941_839829468((*p0).module, (*dest0).t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_263), LOC19, 3);
}
LA1: ;
}
N_NIMCALL(NI, asgncomplexity_549751_839829468)(Tnode292802* n0) {
NI result0;
result0 = (NI)0;
{
if (!!((n0 == NIM_NIL))) goto LA3;
switch ((*n0).kind) {
case ((Tnodekind292020) 3):
{
result0 = ((NI) 1);
}
break;
case ((Tnodekind292020) 139):
{
result0 = ((NI) 100);
}
break;
case ((Tnodekind292020) 138):
{
{
Tnode292802* t_549768_839829468;
t_549768_839829468 = (Tnode292802*)0;
{
NI i_549782_839829468;
NI HEX3Atmp_549784_839829468;
NI LOC10;
NI res_549786_839829468;
i_549782_839829468 = (NI)0;
HEX3Atmp_549784_839829468 = (NI)0;
LOC10 = (NI)0;
LOC10 = len_293081_850551059(n0);
HEX3Atmp_549784_839829468 = (LOC10 - 1);
res_549786_839829468 = ((NI) 0);
{
while (1) {
NI LOC13;
if (!(res_549786_839829468 <= HEX3Atmp_549784_839829468)) goto LA12;
i_549782_839829468 = res_549786_839829468;
t_549768_839829468 = (*n0).kindU.S6.sons->data[i_549782_839829468];
LOC13 = (NI)0;
LOC13 = asgncomplexity_549751_839829468(t_549768_839829468);
result0 += LOC13;
res_549786_839829468 += ((NI) 1);
} LA12: ;
}
}
}
}
break;
default:
{
}
break;
}
}
LA3: ;
return result0;
}
N_NIMCALL(void, genoptasgnobject_550084_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0, Tnode292802* t0) {
Tassignmentflag538302Set newflags0;
{ {
if (!(t0 == NIM_NIL)) goto LA3;
goto BeforeRet;
}
LA3: ;
{
if (!((*src0).s == ((Tstorageloc292812) 1))) goto LA7;
newflags0 = (flags0 | 1);
}
goto LA5;
LA7: ;
{
if (!(((*(*dest0).t).flags &(1U<<((NU)(((Ttypeflag292431) 6))&31U)))!=0)) goto LA10;
newflags0 = (flags0 & ~ 1);
}
goto LA5;
LA10: ;
{
newflags0 = flags0;
}
LA5: ;
switch ((*t0).kind) {
case ((Tnodekind292020) 3):
{
Tsym292834* field0;
Tloc292816 LOC14;
Tloc292816 LOC15;
field0 = (*t0).kindU.S4.sym;
memset((void*)(&LOC14), 0, sizeof(LOC14));
optasgnloc_549789_839829468(dest0, (*field0).typ, (*field0).loc.r, (&LOC14));
memset((void*)(&LOC15), 0, sizeof(LOC15));
optasgnloc_549789_839829468(src0, (*field0).typ, (*field0).loc.r, (&LOC15));
genassignment_539264_839829468(p0, (&LOC14), (&LOC15), newflags0);
}
break;
case ((Tnodekind292020) 138):
{
{
Tnode292802* child_550155_839829468;
child_550155_839829468 = (Tnode292802*)0;
{
NI i_550160_839829468;
NI HEX3Atmp_550162_839829468;
NI LOC19;
NI res_550164_839829468;
i_550160_839829468 = (NI)0;
HEX3Atmp_550162_839829468 = (NI)0;
LOC19 = (NI)0;
LOC19 = len_293081_850551059(t0);
HEX3Atmp_550162_839829468 = (LOC19 - 1);
res_550164_839829468 = ((NI) 0);
{
while (1) {
if (!(res_550164_839829468 <= HEX3Atmp_550162_839829468)) goto LA21;
i_550160_839829468 = res_550164_839829468;
child_550155_839829468 = (*t0).kindU.S6.sons->data[i_550160_839829468];
genoptasgnobject_550084_839829468(p0, dest0, src0, newflags0, child_550155_839829468);
res_550164_839829468 += ((NI) 1);
} LA21: ;
}
}
}
}
break;
default:
{
}
break;
}
}BeforeRet: ;
}
N_NIMCALL(void, genassignment_539264_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0) {
Ttype292840* ty0;
{ {
NIM_BOOL LOC3;
TY532811 LOC7;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((*src0).t == NIM_NIL));
if (!(LOC3)) goto LA4;
LOC3 = ((*(*src0).t).kind == ((Ttypekind292244) 21));
LA4: ;
if (!LOC3) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_538188_839829468(dest0);
LOC7[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC7, 2);
goto BeforeRet;
}
LA5: ;
ty0 = skiptypes_296099_850551059((*dest0).t, IL64(211106233624832));
switch ((*ty0).kind) {
case ((Ttypekind292244) 22):
{
genrefassign_538311_839829468(p0, dest0, src0, flags0);
}
break;
case ((Ttypekind292244) 24):
{
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = !(((flags0 &(1U<<((NU)(((Tassignmentflag538302) 0))&7U)))!=0));
if (!(LOC12)) goto LA13;
LOC12 = !(((*src0).s == ((Tstorageloc292812) 1)));
LA13: ;
if (!LOC12) goto LA14;
genrefassign_538311_839829468(p0, dest0, src0, flags0);
}
goto LA10;
LA14: ;
{
TY535238 LOC17;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = addrloc_538204_839829468(dest0);
LOC17[1] = rdloc_538188_839829468(src0);
LOC17[2] = gentypeinfo_535941_839829468((*p0).module, (*dest0).t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_252), LOC17, 3);
}
LA10: ;
}
break;
case ((Ttypekind292244) 28):
{
{
NIM_BOOL LOC21;
LOC21 = (NIM_BOOL)0;
LOC21 = !(((flags0 &(1U<<((NU)(((Tassignmentflag538302) 0))&7U)))!=0));
if (!(LOC21)) goto LA22;
LOC21 = !(((*src0).s == ((Tstorageloc292812) 1)));
LA22: ;
if (!LOC21) goto LA23;
genrefassign_538311_839829468(p0, dest0, src0, flags0);
}
goto LA19;
LA23: ;
{
{
NIM_BOOL LOC28;
NIM_BOOL LOC30;
TY532811 LOC33;
LOC28 = (NIM_BOOL)0;
LOC28 = ((*dest0).s == ((Tstorageloc292812) 2));
if (LOC28) goto LA29;
LOC30 = (NIM_BOOL)0;
LOC30 = usesnativegc_169177_2607990831();
LOC28 = !(LOC30);
LA29: ;
if (!LOC28) goto LA31;
memset((void*)LOC33, 0, sizeof(LOC33));
LOC33[0] = rdloc_538188_839829468(dest0);
LOC33[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_253), LOC33, 2);
}
goto LA26;
LA31: ;
{
Tloc292816 tmp0;
TY535238 LOC37;
TY178507 LOC38;
if (!((*dest0).s == ((Tstorageloc292812) 3))) goto LA35;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, ty0, (&tmp0), NIM_FALSE);
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = rdloc_538188_839829468(dest0);
LOC37[1] = rdloc_538188_839829468(src0);
LOC37[2] = rdloc_538188_839829468((&tmp0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_254), LOC37, 3);
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = rdloc_538188_839829468((&tmp0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_255), LOC38, 1);
}
goto LA26;
LA35: ;
{
TY532811 LOC40;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = addrloc_538204_839829468(dest0);
LOC40[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_256), LOC40, 2);
}
LA26: ;
}
LA19: ;
}
break;
case ((Ttypekind292244) 25):
{
{
NIM_BOOL LOC44;
Tloc292816 a0;
Ropeobj178006* LOC47;
Tloc292816 LOC48;
Tloc292816 b0;
Ropeobj178006* LOC49;
Tloc292816 LOC50;
TY532811 LOC51;
LOC44 = (NIM_BOOL)0;
LOC44 = needscomplexassignment_533511_839829468((*dest0).t);
if (!LOC44) goto LA45;
memset((void*)(&a0), 0, sizeof(a0));
LOC47 = (Ropeobj178006*)0;
LOC47 = rope_178277_2381377266(((NimStringDesc*) &T839829468_258));
memset((void*)(&LOC48), 0, sizeof(LOC48));
optasgnloc_549789_839829468(dest0, (*dest0).t, LOC47, (&LOC48));
memcpy((void*)(&a0), (NIM_CONST void*)(&LOC48), sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
LOC49 = (Ropeobj178006*)0;
LOC49 = rope_178277_2381377266(((NimStringDesc*) &T839829468_258));
memset((void*)(&LOC50), 0, sizeof(LOC50));
optasgnloc_549789_839829468(src0, (*dest0).t, LOC49, (&LOC50));
memcpy((void*)(&b0), (NIM_CONST void*)(&LOC50), sizeof(b0));
genrefassign_538311_839829468(p0, (&a0), (&b0), flags0);
memset((void*)LOC51, 0, sizeof(LOC51));
LOC51[0] = rdloc_538188_839829468(dest0);
LOC51[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_259), LOC51, 2);
}
goto LA42;
LA45: ;
{
TY532811 LOC53;
memset((void*)LOC53, 0, sizeof(LOC53));
LOC53[0] = rdloc_538188_839829468(dest0);
LOC53[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC53, 2);
}
LA42: ;
}
break;
case ((Ttypekind292244) 18):
{
{
NIM_BOOL LOC57;
LOC57 = (NIM_BOOL)0;
LOC57 = needscomplexassignment_533511_839829468((*dest0).t);
if (!LOC57) goto LA58;
{
NI LOC62;
LOC62 = (NI)0;
LOC62 = len_295339_850551059((*dest0).t);
if (!(LOC62 <= ((NI) 4))) goto LA63;
genoptasgntuple_550001_839829468(p0, dest0, src0, flags0);
}
goto LA60;
LA63: ;
{
gengenericasgn_550167_839829468(p0, dest0, src0, flags0);
}
LA60: ;
}
goto LA55;
LA58: ;
{
TY532811 LOC67;
memset((void*)LOC67, 0, sizeof(LOC67));
LOC67[0] = rdloc_538188_839829468(dest0);
LOC67[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC67, 2);
}
LA55: ;
}
break;
case ((Ttypekind292244) 17):
{
{
NIM_BOOL LOC71;
TY532811 LOC74;
LOC71 = (NIM_BOOL)0;
LOC71 = isimportedcpptype_533478_839829468(ty0);
if (!LOC71) goto LA72;
memset((void*)LOC74, 0, sizeof(LOC74));
LOC74[0] = rdloc_538188_839829468(dest0);
LOC74[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC74, 2);
}
goto LA69;
LA72: ;
{
NIM_BOOL LOC76;
LOC76 = (NIM_BOOL)0;
LOC76 = isobjlackingtypefield_533515_839829468(ty0);
if (!!(LOC76)) goto LA77;
gengenericasgn_550167_839829468(p0, dest0, src0, flags0);
}
goto LA69;
LA77: ;
{
NIM_BOOL LOC80;
LOC80 = (NIM_BOOL)0;
LOC80 = needscomplexassignment_533511_839829468(ty0);
if (!LOC80) goto LA81;
{
NIM_BOOL LOC85;
NI LOC87;
Ropeobj178006* LOC90;
LOC85 = (NIM_BOOL)0;
LOC85 = (*ty0).sons->data[((NI) 0)] == 0;
if (!(LOC85)) goto LA86;
LOC87 = (NI)0;
LOC87 = asgncomplexity_549751_839829468((*ty0).n);
LOC85 = (LOC87 <= ((NI) 4));
LA86: ;
if (!LOC85) goto LA88;
LOC90 = (Ropeobj178006*)0;
LOC90 = gettypedesc_535673_839829468((*p0).module, ty0);
ty0 = getuniquetype_528640_2036603609(ty0);
{
NimStringDesc* LOC95;
if (!!(!(((*ty0).n == NIM_NIL)))) goto LA93;
LOC95 = (NimStringDesc*)0;
LOC95 = HEX24_196185_1689653243(T839829468_264);
internalerror_196113_155036129(LOC95);
}
LA93: ;
genoptasgnobject_550084_839829468(p0, dest0, src0, flags0, (*ty0).n);
}
goto LA83;
LA88: ;
{
gengenericasgn_550167_839829468(p0, dest0, src0, flags0);
}
LA83: ;
}
goto LA69;
LA81: ;
{
TY532811 LOC98;
memset((void*)LOC98, 0, sizeof(LOC98));
LOC98[0] = rdloc_538188_839829468(dest0);
LOC98[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC98, 2);
}
LA69: ;
}
break;
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
{
NIM_BOOL LOC102;
LOC102 = (NIM_BOOL)0;
LOC102 = needscomplexassignment_533511_839829468((*dest0).t);
if (!LOC102) goto LA103;
gengenericasgn_550167_839829468(p0, dest0, src0, flags0);
}
goto LA100;
LA103: ;
{
TY535238 LOC106;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC106, 0, sizeof(LOC106));
LOC106[0] = rdloc_538188_839829468(dest0);
LOC106[1] = rdloc_538188_839829468(src0);
LOC106[2] = gettypedesc_535673_839829468((*p0).module, ty0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_261), LOC106, 3);
}
LA100: ;
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
{
NIM_BOOL LOC110;
TY535238 LOC113;
LOC110 = (NIM_BOOL)0;
LOC110 = needscomplexassignment_533511_839829468((*dest0).t);
if (!LOC110) goto LA111;
memset((void*)LOC113, 0, sizeof(LOC113));
LOC113[0] = addrloc_538204_839829468(dest0);
LOC113[1] = addrloc_538204_839829468(src0);
LOC113[2] = gentypeinfo_535941_839829468((*p0).module, (*dest0).t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_266), LOC113, 3);
}
goto LA108;
LA111: ;
{
TY532811 LOC115;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC115, 0, sizeof(LOC115));
LOC115[0] = rdloc_538188_839829468(dest0);
LOC115[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_267), LOC115, 2);
}
LA108: ;
}
break;
case ((Ttypekind292244) 19):
{
{
Tctypekind529007 LOC119;
TY535238 LOC122;
NI64 LOC123;
LOC119 = (Tctypekind529007)0;
LOC119 = maptype_533394_839829468(ty0);
if (!(LOC119 == ((Tctypekind529007) 17))) goto LA120;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC122, 0, sizeof(LOC122));
LOC122[0] = rdloc_538188_839829468(dest0);
LOC122[1] = rdloc_538188_839829468(src0);
LOC123 = (NI64)0;
LOC123 = getsize_320135_3876443242((*dest0).t);
LOC122[2] = rope_178401_2381377266(LOC123);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_268), LOC122, 3);
}
goto LA117;
LA120: ;
{
TY532811 LOC125;
memset((void*)LOC125, 0, sizeof(LOC125));
LOC125[0] = rdloc_538188_839829468(dest0);
LOC125[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC125, 2);
}
LA117: ;
}
break;
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 26):
case ((Ttypekind292244) 2):
case ((Ttypekind292244) 1):
case ((Ttypekind292244) 14):
case ((Ttypekind292244) 29):
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44):
case ((Ttypekind292244) 20):
case ((Ttypekind292244) 23):
{
TY532811 LOC127;
memset((void*)LOC127, 0, sizeof(LOC127));
LOC127[0] = rdloc_538188_839829468(dest0);
LOC127[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC127, 2);
}
break;
default:
{
NimStringDesc* LOC129;
LOC129 = (NimStringDesc*)0;
LOC129 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI292244))->Sup.len + 15);
appendString(LOC129, ((NimStringDesc*) &T839829468_269));
appendString(LOC129, reprEnum((NI)(*ty0).kind, (&NTI292244)));
internalerror_196113_155036129(LOC129);
}
break;
}
}BeforeRet: ;
}
N_NIMCALL(void, putlocintodest_539258_839829468)(Tcproc529021* p0, Tloc292816* d0, Tloc292816* s0) {
{
if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3;
{
if (!(((*d0).flags &(1U<<((NU)(((Tlocflag292810) 2))&15U)))!=0)) goto LA7;
genassignment_539264_839829468(p0, (&(*d0)), s0, 0);
}
goto LA5;
LA7: ;
{
genassignment_539264_839829468(p0, (&(*d0)), s0, 1);
}
LA5: ;
}
goto LA1;
LA3: ;
{
genericAssign((void*)(&(*d0)), (void*)s0, (&NTI292816));
}
LA1: ;
}
N_NIMCALL(NIM_BOOL, issimpleconst_532311_839829468)(Ttype292840* typ0) {
NIM_BOOL result0;
Ttype292840* t0;
NIM_BOOL LOC1;
NIM_BOOL LOC3;
result0 = (NIM_BOOL)0;
t0 = skiptypes_296099_850551059(typ0, IL64(211106240964864));
LOC1 = (NIM_BOOL)0;
LOC1 = !(((17760272 &((NU64)1<<((NU)((*t0).kind)&63U)))!=0));
if (!(LOC1)) goto LA2;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*t0).kind == ((Ttypekind292244) 25));
if (!(LOC3)) goto LA4;
LOC3 = ((*t0).callconv == ((Tcallingconvention292002) 8));
LA4: ;
LOC1 = !(LOC3);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, putintodest_550468_839829468)(Tcproc529021* p0, Tloc292816* d0, Ttype292840* t0, Ropeobj178006* r0, Tstorageloc292812 s0) {
Tloc292816 a0;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3;
initloc_532273_839829468((&a0), ((Tlockind292808) 6), t0, s0);
a0.r = r0;
{
if (!(((*d0).flags &(1U<<((NU)(((Tlocflag292810) 2))&15U)))!=0)) goto LA7;
genassignment_539264_839829468(p0, (&(*d0)), (&a0), 0);
}
goto LA5;
LA7: ;
{
genassignment_539264_839829468(p0, (&(*d0)), (&a0), 1);
}
LA5: ;
}
goto LA1;
LA3: ;
{
(*d0).k = ((Tlockind292808) 6);
unsureAsgnRef((void**) (&(*d0).t), t0);
unsureAsgnRef((void**) (&(*d0).r), r0);
}
LA1: ;
}
N_NIMCALL(NI64, bitsettoword_549578_839829468)(Tbitset339004* s0, NI size0) {
NI64 result0;
result0 = (NI64)0;
result0 = IL64(0);
{
NI j_549612_839829468;
NI HEX3Atmp_549622_839829468;
NI res_549625_839829468;
j_549612_839829468 = (NI)0;
HEX3Atmp_549622_839829468 = (NI)0;
HEX3Atmp_549622_839829468 = (NI)(size0 - ((NI) 1));
res_549625_839829468 = ((NI) 0);
{
while (1) {
if (!(res_549625_839829468 <= HEX3Atmp_549622_839829468)) goto LA3;
j_549612_839829468 = res_549625_839829468;
{
if (!(j_549612_839829468 < (s0 ? s0->Sup.len : 0))) goto LA6;
result0 = (NI64)(result0 | (NI64)((NU64)(((NI64)(NU64)(NU8)(s0->data[j_549612_839829468]))) << (NU64)(((NI64) ((NI)(j_549612_839829468 * ((NI) 8)))))));
}
LA6: ;
res_549625_839829468 += ((NI) 1);
} LA3: ;
}
}
return result0;
}
N_NIMCALL(Ropeobj178006*, genrawsetdata_549629_839829468)(Tbitset339004* cs0, NI size0) {
Ropeobj178006* result0;
NimStringDesc* frmt0;
result0 = (Ropeobj178006*)0;
frmt0 = (NimStringDesc*)0;
{
TY533289 LOC5;
if (!(((NI) 8) < size0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_273), LOC5, 0);
{
NI i_549649_839829468;
NI HEX3Atmp_549657_839829468;
NI res_549660_839829468;
i_549649_839829468 = (NI)0;
HEX3Atmp_549657_839829468 = (NI)0;
HEX3Atmp_549657_839829468 = (NI)(size0 - ((NI) 1));
res_549660_839829468 = ((NI) 0);
{
while (1) {
TY178507 LOC19;
NimStringDesc* LOC20;
if (!(res_549660_839829468 <= HEX3Atmp_549657_839829468)) goto LA8;
i_549649_839829468 = res_549660_839829468;
{
if (!(i_549649_839829468 < (NI)(size0 - ((NI) 1)))) goto LA11;
{
if (!(((NI) ((NI)((NI)(i_549649_839829468 + ((NI) 1)) % ((NI) 8)))) == ((NI) 0))) goto LA15;
frmt0 = copyString(((NimStringDesc*) &T839829468_274));
}
goto LA13;
LA15: ;
{
frmt0 = copyString(((NimStringDesc*) &T839829468_275));
}
LA13: ;
}
goto LA9;
LA11: ;
{
frmt0 = copyString(((NimStringDesc*) &T839829468_276));
}
LA9: ;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC20 = (NimStringDesc*)0;
LOC20 = nsuToHex(((NI64)(NU64)(NU8)(cs0->data[i_549649_839829468])), ((NI) 2));
LOC19[0] = rope_178277_2381377266(LOC20);
addf_179205_2381377266(&result0, frmt0, LOC19, 1);
res_549660_839829468 += ((NI) 1);
} LA8: ;
}
}
}
goto LA1;
LA3: ;
{
NI64 LOC22;
LOC22 = (NI64)0;
LOC22 = bitsettoword_549578_839829468(cs0, size0);
result0 = intliteral_539270_839829468(LOC22);
}
LA1: ;
return result0;
}
N_NIMCALL(void, appcg_532640_839829468)(Tcgen529027* m0, Tcfilesection529005 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006* LOC1;
LOC1 = (Ropeobj178006*)0;
LOC1 = ropecg_532407_839829468(m0, frmt0, args0, args0Len0);
add_178482_2381377266(&(*m0).s[(s0)- 0], LOC1);
}
N_NIMCALL(Ropeobj178006*, genconstseq_559371_839829468)(Tcproc529021* p0, Tnode292802* n0, Ttype292840* t0) {
Ropeobj178006* result0;
Ropeobj178006* data0;
TY178507 LOC1;
NI LOC2;
TY535235 LOC18;
NI LOC19;
TY532811 LOC20;
result0 = (Ropeobj178006*)0;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (NI)0;
LOC2 = len_293081_850551059(n0);
LOC1[0] = rope_178401_2381377266(((NI64) (LOC2)));
data0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_277), LOC1, 1);
{
NI LOC5;
LOC5 = (NI)0;
LOC5 = len_293081_850551059(n0);
if (!(((NI) 0) < LOC5)) goto LA6;
add_178487_2381377266(&data0, ((NimStringDesc*) &T839829468_278));
{
NI i_559395_839829468;
NI HEX3Atmp_559411_839829468;
NI LOC9;
NI res_559414_839829468;
i_559395_839829468 = (NI)0;
HEX3Atmp_559411_839829468 = (NI)0;
LOC9 = (NI)0;
LOC9 = len_293081_850551059(n0);
HEX3Atmp_559411_839829468 = (NI)(LOC9 - ((NI) 1));
res_559414_839829468 = ((NI) 0);
{
while (1) {
Ropeobj178006* LOC17;
if (!(res_559414_839829468 <= HEX3Atmp_559411_839829468)) goto LA11;
i_559395_839829468 = res_559414_839829468;
{
TY533289 LOC16;
if (!(((NI) 0) < i_559395_839829468)) goto LA14;
memset((void*)LOC16, 0, sizeof(LOC16));
addf_179205_2381377266(&data0, ((NimStringDesc*) &T839829468_279), LOC16, 0);
}
LA14: ;
LOC17 = (Ropeobj178006*)0;
LOC17 = genconstexpr_554849_839829468(p0, (*n0).kindU.S6.sons->data[i_559395_839829468]);
add_178482_2381377266(&data0, LOC17);
res_559414_839829468 += ((NI) 1);
} LA11: ;
}
}
add_178487_2381377266(&data0, ((NimStringDesc*) &T839829468_280));
}
LA6: ;
add_178487_2381377266(&data0, ((NimStringDesc*) &T839829468_280));
result0 = gettempname_533598_839829468((*p0).module);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = gettypedesc_535673_839829468((*p0).module, (*t0).sons->data[((NI) 0)]);
LOC19 = (NI)0;
LOC19 = len_293081_850551059(n0);
LOC18[1] = rope_178401_2381377266(((NI64) (LOC19)));
LOC18[2] = result0;
LOC18[3] = data0;
appcg_532640_839829468((*p0).module, ((Tcfilesection529005) 8), ((NimStringDesc*) &T839829468_281), LOC18, 4);
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = gettypedesc_535673_839829468((*p0).module, t0);
LOC20[1] = result0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_282), LOC20, 2);
return result0;
}
N_NIMCALL(Ropeobj178006*, gennamedconstexpr_559284_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!((*n0).kind == ((Tnodekind292020) 34))) goto LA3;
result0 = genconstexpr_554849_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)]);
}
goto LA1;
LA3: ;
{
result0 = genconstexpr_554849_839829468(p0, n0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genconstsimplelist_559299_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
NI length0;
TY533289 LOC10;
result0 = (Ropeobj178006*)0;
length0 = sonslen_295351_850551059(n0);
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_223));
{
NI i_559333_839829468;
NI HEX3Atmp_559362_839829468;
NI HEX3Atmp_559363_839829468;
NI res_559366_839829468;
i_559333_839829468 = (NI)0;
HEX3Atmp_559362_839829468 = (NI)0;
HEX3Atmp_559363_839829468 = (NI)0;
HEX3Atmp_559362_839829468 = ((*n0).kind == ((Tnodekind292020) 38));
HEX3Atmp_559363_839829468 = (NI)(length0 - ((NI) 2));
res_559366_839829468 = ((NI) (HEX3Atmp_559362_839829468));
{
while (1) {
TY178507 LOC4;
if (!(res_559366_839829468 <= HEX3Atmp_559363_839829468)) goto LA3;
i_559333_839829468 = res_559366_839829468;
memset((void*)LOC4, 0, sizeof(LOC4));
LOC4[0] = gennamedconstexpr_559284_839829468(p0, (*n0).kindU.S6.sons->data[i_559333_839829468]);
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_283), LOC4, 1);
res_559366_839829468 += ((NI) 1);
} LA3: ;
}
}
{
Ropeobj178006* LOC9;
if (!(((NI) (((*n0).kind == ((Tnodekind292020) 38)))) < length0)) goto LA7;
LOC9 = (Ropeobj178006*)0;
LOC9 = gennamedconstexpr_559284_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))]);
add_178482_2381377266(&result0, LOC9);
}
LA7: ;
memset((void*)LOC10, 0, sizeof(LOC10));
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_160), LOC10, 0);
return result0;
}
N_NIMCALL(Ropeobj178006*, genconstexpr_554849_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
switch ((*n0).kind) {
case ((Tnodekind292020) 58):
case ((Tnodekind292020) 59):
{
result0 = genconstexpr_554849_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)]);
}
break;
case ((Tnodekind292020) 39):
{
Tbitset339004* cs0;
NI64 LOC3;
cs0 = (Tbitset339004*)0;
tobitset_340001_452470228(n0, (&cs0));
LOC3 = (NI64)0;
LOC3 = getsize_320135_3876443242((*n0).typ);
result0 = genrawsetdata_549629_839829468(cs0, ((NI) (LOC3)));
}
break;
case ((Tnodekind292020) 41):
case ((Tnodekind292020) 37):
case ((Tnodekind292020) 155):
case ((Tnodekind292020) 38):
{
Ttype292840* t0;
t0 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256));
{
if (!((*t0).kind == ((Ttypekind292244) 24))) goto LA7;
result0 = genconstseq_559371_839829468(p0, n0, t0);
}
goto LA5;
LA7: ;
{
result0 = genconstsimplelist_559299_839829468(p0, n0);
}
LA5: ;
}
break;
default:
{
Tloc292816 d0;
memset((void*)(&d0), 0, sizeof(d0));
initlocexpr_539283_839829468(p0, n0, (&d0));
result0 = rdloc_538188_839829468((&d0));
}
break;
}
return result0;
}
N_NIMCALL(void, requestconstimpl_539240_839829468)(Tcproc529021* p0, Tsym292834* sym0) {
Tcgen529027* m0;
Tcgen529027* q0;
{ m0 = (*p0).module;
useheader_532369_839829468(m0, sym0);
{
Ropeobj178006* LOC5;
if (!((*sym0).loc.k == ((Tlockind292808) 0))) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = manglename_533205_839829468(sym0);
fillloc_532282_839829468((&(*sym0).loc), ((Tlockind292808) 8), (*sym0).typ, LOC5, ((Tstorageloc292812) 1));
}
LA3: ;
{
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0)) goto LA8;
goto BeforeRet;
}
LA8: ;
q0 = findpendingmodule_532241_839829468(m0, sym0);
{
NIM_BOOL LOC12;
NIM_BOOL LOC14;
TY535238 LOC17;
LOC12 = (NIM_BOOL)0;
LOC12 = !((q0 == NIM_NIL));
if (!(LOC12)) goto LA13;
LOC14 = (NIM_BOOL)0;
LOC14 = containsorincl_268862_2627731572((&(*q0).declaredthings), (*sym0).Sup.id);
LOC12 = !(LOC14);
LA13: ;
if (!LOC12) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = gettypedesc_535673_839829468(q0, (*sym0).typ);
LOC17[1] = (*sym0).loc.r;
LOC17[2] = genconstexpr_554849_839829468((*q0).initproc, (*sym0).ast);
addf_179205_2381377266(&(*q0).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC17, 3);
}
LA15: ;
{
NIM_BOOL LOC20;
NIM_BOOL LOC22;
Ropeobj178006* headerdecl0;
TY532811 LOC25;
LOC20 = (NIM_BOOL)0;
LOC20 = !((q0 == m0));
if (!(LOC20)) goto LA21;
LOC22 = (NIM_BOOL)0;
LOC22 = containsorincl_268862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id);
LOC20 = !(LOC22);
LA21: ;
if (!LOC20) goto LA23;
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = gettypedesc_535673_839829468(m0, (*sym0).loc.t);
LOC25[1] = (*sym0).loc.r;
headerdecl0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_284), LOC25, 2);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 8))- 0], headerdecl0);
{
NIM_BOOL LOC28;
LOC28 = (NIM_BOOL)0;
LOC28 = (((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 6))&31U)))!=0);
if (!(LOC28)) goto LA29;
LOC28 = !((generatedheader_532201_839829468 == NIM_NIL));
LA29: ;
if (!LOC28) goto LA30;
add_178482_2381377266(&(*generatedheader_532201_839829468).s[(((Tcfilesection529005) 8))- 0], headerdecl0);
}
LA30: ;
}
LA23: ;
}BeforeRet: ;
}
N_NIMCALL(void, gencomplexconst_558249_839829468)(Tcproc529021* p0, Tsym292834* sym0, Tloc292816* d0) {
requestconstimpl_539240_839829468(p0, sym0);
putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc));
}
static N_INLINE(Ropeobj178006**, procsec_529194_3723162438)(Tcproc529021* p0, Tcprocsection529011 s0) {
Ropeobj178006** result0;
result0 = (Ropeobj178006**)0;
result0 = &(*p0).blocks->data[((NI) 0)].sections[(s0)- 0];
return result0;
}
N_NIMCALL(void, accessthreadlocalvar_532945_839829468)(Tcproc529021* p0, Tsym292834* s0) {
{
NIM_BOOL LOC3;
Ropeobj178006** LOC7;
TY533289 LOC8;
Ropeobj178006** LOC9;
TY533289 LOC10;
Ropeobj178006* LOC11;
LOC3 = (NIM_BOOL)0;
LOC3 = emulatedthreadvars_532949_839829468();
if (!(LOC3)) goto LA4;
LOC3 = !((*p0).threadvaraccessed);
LA4: ;
if (!LOC3) goto LA5;
(*p0).threadvaraccessed = NIM_TRUE;
(*(*p0).module).flags |= ((NU8)1)<<((((Codegenflag529025) 1))%(sizeof(NU8)*8));
LOC7 = (Ropeobj178006**)0;
LOC7 = procsec_529194_3723162438(p0, ((Tcprocsection529011) 0));
memset((void*)LOC8, 0, sizeof(LOC8));
addf_179205_2381377266(LOC7, ((NimStringDesc*) &T839829468_286), LOC8, 0);
LOC9 = (Ropeobj178006**)0;
LOC9 = procsec_529194_3723162438(p0, ((Tcprocsection529011) 1));
memset((void*)LOC10, 0, sizeof(LOC10));
LOC11 = (Ropeobj178006*)0;
LOC11 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_287), LOC10, 0);
add_178482_2381377266(LOC9, LOC11);
}
LA5: ;
}
static N_INLINE(NIM_BOOL, isemptytype_297441_850551059)(Ttype292840* t0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = (t0 == NIM_NIL);
if (LOC1) goto LA2;
LOC1 = ((IL64(4611686018427388032) &((NU64)1<<((NU)((*t0).kind)&63U)))!=0);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, putdataintodest_550436_839829468)(Tcproc529021* p0, Tloc292816* d0, Ttype292840* t0, Ropeobj178006* r0) {
Tloc292816 a0;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3;
initloc_532273_839829468((&a0), ((Tlockind292808) 8), t0, ((Tstorageloc292812) 1));
a0.r = r0;
{
if (!(((*d0).flags &(1U<<((NU)(((Tlocflag292810) 2))&15U)))!=0)) goto LA7;
genassignment_539264_839829468(p0, (&(*d0)), (&a0), 0);
}
goto LA5;
LA7: ;
{
genassignment_539264_839829468(p0, (&(*d0)), (&a0), 1);
}
LA5: ;
}
goto LA1;
LA3: ;
{
(*d0).k = ((Tlockind292808) 8);
unsureAsgnRef((void**) (&(*d0).t), t0);
unsureAsgnRef((void**) (&(*d0).r), r0);
}
LA1: ;
}
N_NIMCALL(NIM_BOOL, freshlineinfo_532818_839829468)(Tcproc529021* p0, Tlineinfo191336 info0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((*p0).lastlineinfo.line == info0.line));
if (LOC3) goto LA4;
LOC3 = !(((*p0).lastlineinfo.fileindex == info0.fileindex));
LA4: ;
if (!LOC3) goto LA5;
(*p0).lastlineinfo.line = info0.line;
(*p0).lastlineinfo.fileindex = info0.fileindex;
result0 = NIM_TRUE;
}
LA5: ;
return result0;
}
N_NIMCALL(void, genlinedir_532823_839829468)(Tcproc529021* p0, Tnode292802* t0) {
NI line0;
Ropeobj178006** LOC11;
NimStringDesc* LOC12;
line0 = safelinenm_532721_839829468((*t0).info);
{
Ropeobj178006** LOC5;
TY533289 LOC6;
Ropeobj178006* LOC7;
Ropeobj178006* LOC8;
Ropeobj178006* LOC9;
Ropeobj178006* LOC10;
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 28))&63U)))!=0)) goto LA3;
LOC5 = (Ropeobj178006**)0;
LOC5 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
memset((void*)LOC6, 0, sizeof(LOC6));
LOC7 = (Ropeobj178006*)0;
LOC7 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_293), LOC6, 0);
LOC8 = (Ropeobj178006*)0;
LOC8 = sourceline_192065_155036129((*t0).info);
LOC9 = (Ropeobj178006*)0;
LOC9 = HEX26_178418_2381377266(LOC7, LOC8);
LOC10 = (Ropeobj178006*)0;
LOC10 = HEX26_178418_2381377266(LOC9, rnl_178903_2381377266);
add_178482_2381377266(LOC5, LOC10);
}
LA3: ;
LOC11 = (Ropeobj178006**)0;
LOC11 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
LOC12 = (NimStringDesc*)0;
LOC12 = tofullpath_192261_155036129((*t0).info.fileindex);
genclinedir_532725_839829468(LOC11, LOC12, line0);
{
NIM_BOOL LOC15;
NIM_BOOL LOC17;
LOC15 = (NIM_BOOL)0;
LOC15 = ((163840 & (*p0).options) == 163840);
if (!(LOC15)) goto LA16;
LOC17 = (NIM_BOOL)0;
LOC17 = ((*p0).prc == NIM_NIL);
if (LOC17) goto LA18;
LOC17 = !((((*(*p0).prc).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0));
LA18: ;
LOC15 = LOC17;
LA16: ;
if (!LOC15) goto LA19;
{
NIM_BOOL LOC23;
TY532811 LOC26;
NimStringDesc* LOC27;
LOC23 = (NIM_BOOL)0;
LOC23 = freshlineinfo_532818_839829468(p0, (*t0).info);
if (!LOC23) goto LA24;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = rope_178401_2381377266(((NI64) (line0)));
LOC27 = (NimStringDesc*)0;
LOC27 = tofilename_192257_155036129((*t0).info.fileindex);
LOC26[1] = makecstring_191638_155036129(LOC27);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_294), LOC26, 2);
}
LA24: ;
}
goto LA13;
LA19: ;
{
NIM_BOOL LOC29;
NIM_BOOL LOC30;
NIM_BOOL LOC32;
LOC29 = (NIM_BOOL)0;
LOC30 = (NIM_BOOL)0;
LOC30 = ((98304 & (*p0).options) == 98304);
if (!(LOC30)) goto LA31;
LOC32 = (NIM_BOOL)0;
LOC32 = ((*p0).prc == NIM_NIL);
if (LOC32) goto LA33;
LOC32 = !((((*(*p0).prc).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0));
LA33: ;
LOC30 = LOC32;
LA31: ;
LOC29 = LOC30;
if (!(LOC29)) goto LA34;
LOC29 = (((NI32) 0) <= (*t0).info.fileindex);
LA34: ;
if (!LOC29) goto LA35;
{
NIM_BOOL LOC39;
TY532811 LOC42;
LOC39 = (NIM_BOOL)0;
LOC39 = freshlineinfo_532818_839829468(p0, (*t0).info);
if (!LOC39) goto LA40;
memset((void*)LOC42, 0, sizeof(LOC42));
LOC42[0] = rope_178401_2381377266(((NI64) (line0)));
LOC42[1] = quotedfilename_196818_155036129((*t0).info);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_295), LOC42, 2);
}
LA40: ;
}
goto LA13;
LA35: ;
LA13: ;
}
N_NIMCALL(Ropeobj178006*, getlabel_539217_839829468)(Tcproc529021* p0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
(*p0).labels += ((NI) 1);
LOC1 = (Ropeobj178006*)0;
LOC1 = rope_178401_2381377266(((NI64) ((*p0).labels)));
result0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_296), LOC1);
return result0;
}
N_NIMCALL(void, fixlabel_539230_839829468)(Tcproc529021* p0, Ropeobj178006* labl0) {
TY178507 LOC1;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = labl0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_299), LOC1, 1);
}
N_NIMCALL(void, genandor_554311_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0) {
Ropeobj178006* L0;
Tloc292816 tmp0;
L0 = (Ropeobj178006*)0;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, (*e0).typ, (&tmp0), NIM_FALSE);
(*p0).splitdecls += ((NI) 1);
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0));
L0 = getlabel_539217_839829468(p0);
{
TY532811 LOC5;
if (!(m0 == ((Tmagic292524) 127))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468((&tmp0));
LOC5[1] = L0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_297), LOC5, 2);
}
goto LA1;
LA3: ;
{
TY532811 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_538188_839829468((&tmp0));
LOC7[1] = L0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_298), LOC7, 2);
}
LA1: ;
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&tmp0));
fixlabel_539230_839829468(p0, L0);
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA10;
genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI292816));
}
goto LA8;
LA10: ;
{
genassignment_539264_839829468(p0, (&(*d0)), (&tmp0), 0);
}
LA8: ;
(*p0).splitdecls -= ((NI) 1);
}
N_NIMCALL(void, unaryarith_552646_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) {
Tloc292816 a0;
Ttype292840* t0;
TY535238 LOC1;
NI64 LOC2;
Ropeobj178006* LOC3;
memset((void*)(&a0), 0, sizeof(a0));
t0 = (Ttype292840*)0;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
t0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((&a0));
LOC2 = (NI64)0;
LOC2 = getsize_320135_3876443242(t0);
LOC1[1] = rope_178401_2381377266((NI64)(LOC2 * IL64(8)));
LOC1[2] = getsimpletypedesc_533936_839829468((*p0).module, (*e0).typ);
LOC3 = (Ropeobj178006*)0;
LOC3 = HEX25_178905_2381377266(unarithtab_552653_839829468[(op0)- 99], LOC1, 3);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC3, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, unaryarithoverflow_551633_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0) {
Tloc292816 a0;
Ttype292840* t0;
TY532811 LOC7;
NI64 LOC8;
Ropeobj178006* LOC9;
memset((void*)(&a0), 0, sizeof(a0));
t0 = (Ttype292840*)0;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
t0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832));
{
TY532811 LOC5;
NI64 LOC6;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 5))&31U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468((&a0));
LOC6 = (NI64)0;
LOC6 = firstord_320001_3876443242(t0);
LOC5[1] = intliteral_539270_839829468(LOC6);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_317), LOC5, 2);
}
LA3: ;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_538188_839829468((&a0));
LOC8 = (NI64)0;
LOC8 = getsize_320135_3876443242(t0);
LOC7[1] = rope_178401_2381377266((NI64)(LOC8 * IL64(8)));
LOC9 = (Ropeobj178006*)0;
LOC9 = HEX25_178905_2381377266(opr_551640_839829468[(m0)- 96], LOC7, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC9, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, binaryarith_551819_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) {
Tloc292816 a0;
Tloc292816 b0;
NI64 s0;
NI64 LOC1;
NI64 LOC2;
TY535235 LOC3;
Ropeobj178006* LOC4;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
s0 = (NI64)0;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
LOC1 = (NI64)0;
LOC1 = getsize_320135_3876443242(a0.t);
LOC2 = (NI64)0;
LOC2 = getsize_320135_3876443242(b0.t);
s0 = (NI64)(((LOC1 >= LOC2) ? LOC1 : LOC2) * IL64(8));
memset((void*)LOC3, 0, sizeof(LOC3));
LOC3[0] = rdloc_538188_839829468((&a0));
LOC3[1] = rdloc_538188_839829468((&b0));
LOC3[2] = rope_178401_2381377266(s0);
LOC3[3] = getsimpletypedesc_533936_839829468((*p0).module, (*e0).typ);
LOC4 = (Ropeobj178006*)0;
LOC4 = HEX25_178905_2381377266(binarithtab_551826_839829468[(op0)- 52], LOC3, 4);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC4, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, binaryfloatarith_556729_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0) {
{
Tloc292816 a0;
Tloc292816 b0;
TY535235 LOC5;
Tnode292802* LOC6;
Ropeobj178006* LOC7;
if (!!(((384 & (*p0).options) == 0))) goto LA3;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_178277_2381377266(opr_556763_839829468[(m0)- 52]);
LOC5[1] = rdloc_538188_839829468((&a0));
LOC5[2] = rdloc_538188_839829468((&b0));
LOC6 = (Tnode292802*)0;
LOC6 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1));
LOC5[3] = getsimpletypedesc_533936_839829468((*p0).module, (*LOC6).typ);
LOC7 = (Ropeobj178006*)0;
LOC7 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_319), LOC5, 4);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC7, ((Tstorageloc292812) 0));
{
TY178507 LOC12;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 7))&31U)))!=0)) goto LA10;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rdloc_538188_839829468((&(*d0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_323), LOC12, 1);
}
LA10: ;
{
TY178507 LOC17;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 8))&31U)))!=0)) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = rdloc_538188_839829468((&(*d0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_324), LOC17, 1);
}
LA15: ;
}
goto LA1;
LA3: ;
{
binaryarith_551819_839829468(p0, e0, d0, m0);
}
LA1: ;
}
N_NIMCALL(void, geneqproc_552214_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
{
Ttype292840* LOC3;
TY532811 LOC6;
Ropeobj178006* LOC7;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059(a0.t, IL64(211106232576256));
if (!((*LOC3).callconv == ((Tcallingconvention292002) 8))) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rdloc_538188_839829468((&a0));
LOC6[1] = rdloc_538188_839829468((&b0));
LOC7 = (Ropeobj178006*)0;
LOC7 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_352), LOC6, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC7, ((Tstorageloc292812) 0));
}
goto LA1;
LA4: ;
{
TY532811 LOC9;
Ropeobj178006* LOC10;
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = rdloc_538188_839829468((&a0));
LOC9[1] = rdloc_538188_839829468((&b0));
LOC10 = (Ropeobj178006*)0;
LOC10 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_341), LOC9, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC10, ((Tstorageloc292812) 0));
}
LA1: ;
}
N_NIMCALL(Ropeobj178006*, rdcharloc_538227_839829468)(Tloc292816* a0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = rdloc_538188_839829468(a0);
{
Ttype292840* LOC3;
TY178507 LOC6;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059((*a0).t, IL64(211106233624832));
if (!((*LOC3).kind == ((Ttypekind292244) 2))) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = result0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_358), LOC6, 1);
}
LA4: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, binaryarithoverflowraw_551235_839829468)(Tcproc529021* p0, Ttype292840* t0, Tloc292816* a0, Tloc292816* b0, NimStringDesc* frmt0) {
Ropeobj178006* result0;
NI64 size0;
Ropeobj178006* storage0;
TY532811 LOC6;
TY535238 LOC7;
result0 = (Ropeobj178006*)0;
size0 = getsize_320135_3876443242(t0);
{
if (!(size0 < ((NI64) (intsize_176641_4151366050)))) goto LA3;
storage0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_36));
}
goto LA1;
LA3: ;
{
storage0 = gettypedesc_535673_839829468((*p0).module, t0);
}
LA1: ;
result0 = gettempname_533598_839829468((*p0).module);
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = storage0;
LOC6[1] = result0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_54), LOC6, 2);
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = result0;
LOC7[1] = rdcharloc_538227_839829468(a0);
LOC7[2] = rdcharloc_538227_839829468(b0);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), frmt0, LOC7, 3);
{
NIM_BOOL LOC10;
TY535238 LOC14;
NI64 LOC15;
NI64 LOC16;
LOC10 = (NIM_BOOL)0;
LOC10 = (size0 < ((NI64) (intsize_176641_4151366050)));
if (LOC10) goto LA11;
LOC10 = ((1064960 &((NU64)1<<((NU)((*t0).kind)&63U)))!=0);
LA11: ;
if (!LOC10) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = result0;
LOC15 = (NI64)0;
LOC15 = firstord_320001_3876443242(t0);
LOC14[1] = intliteral_539270_839829468(LOC15);
LOC16 = (NI64)0;
LOC16 = lastord_320004_3876443242(t0);
LOC14[2] = intliteral_539270_839829468(LOC16);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_359), LOC14, 3);
}
LA12: ;
return result0;
}
N_NIMCALL(void, binaryarithoverflow_551262_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* t0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
t0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832));
{
Ropeobj178006* res0;
TY535238 LOC5;
if (!!((((*p0).options &(1U<<((NU)(((Toption169009) 5))&31U)))!=0))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = gettypedesc_535673_839829468((*p0).module, t0);
LOC5[1] = rdloc_538188_839829468((&a0));
LOC5[2] = rdloc_538188_839829468((&b0));
res0 = HEX25_178905_2381377266(opr_551279_839829468[(m0)- 45], LOC5, 3);
putintodest_550468_839829468(p0, d0, (*e0).typ, res0, ((Tstorageloc292812) 0));
}
goto LA1;
LA3: ;
{
Ropeobj178006* res0;
NimStringDesc* LOC7;
TY532811 LOC13;
Ropeobj178006* LOC14;
LOC7 = (NimStringDesc*)0;
{
if (!((*t0).kind == ((Ttypekind292244) 35))) goto LA10;
LOC7 = copyString(prc64_551274_839829468[(m0)- 45]);
}
goto LA8;
LA10: ;
{
LOC7 = copyString(prc_551269_839829468[(m0)- 45]);
}
LA8: ;
res0 = binaryarithoverflowraw_551235_839829468(p0, t0, (&a0), (&b0), LOC7);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = gettypedesc_535673_839829468((*p0).module, t0);
LOC13[1] = res0;
LOC14 = (Ropeobj178006*)0;
LOC14 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_370), LOC13, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC14, ((Tstorageloc292812) 0));
}
LA1: ;
}
N_NIMCALL(Ropeobj178006*, lenfield_539305_839829468)(Tcproc529021* p0) {
Ropeobj178006* result0;
NimStringDesc* LOC1;
result0 = (Ropeobj178006*)0;
LOC1 = (NimStringDesc*)0;
{
NIM_BOOL LOC4;
LOC4 = (NIM_BOOL)0;
LOC4 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC4) goto LA5;
LOC4 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA5: ;
if (!LOC4) goto LA6;
LOC1 = copyString(((NimStringDesc*) &T839829468_157));
}
goto LA2;
LA6: ;
{
LOC1 = copyString(((NimStringDesc*) &T839829468_158));
}
LA2: ;
result0 = rope_178277_2381377266(LOC1);
return result0;
}
N_NIMCALL(void, gcusage_554439_839829468)(Tnode292802* n0) {
{
NimStringDesc* LOC5;
if (!(gselectedgc_169133_2607990831 == ((Tgcmode169080) 0))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = rendertree_311044_382274130(n0, 0);
message_196095_155036129((*n0).info, ((Tmsgkind191002) 263), LOC5);
}
LA3: ;
}
N_NIMCALL(void, genrepr_555339_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* t0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440));
switch ((*t0).kind) {
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 35):
case ((Ttypekind292244) 40) ... ((Ttypekind292244) 44):
{
TY178507 LOC2;
Ropeobj178006* LOC3;
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = rdloc_538188_839829468((&a0));
LOC3 = (Ropeobj178006*)0;
LOC3 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_371), LOC2, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC3, a0.s);
}
break;
case ((Ttypekind292244) 36) ... ((Ttypekind292244) 39):
{
TY178507 LOC5;
Ropeobj178006* LOC6;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468((&a0));
LOC6 = (Ropeobj178006*)0;
LOC6 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_372), LOC5, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC6, a0.s);
}
break;
case ((Ttypekind292244) 1):
{
TY178507 LOC8;
Ropeobj178006* LOC9;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468((&a0));
LOC9 = (Ropeobj178006*)0;
LOC9 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_373), LOC8, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC9, a0.s);
}
break;
case ((Ttypekind292244) 2):
{
TY178507 LOC11;
Ropeobj178006* LOC12;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = rdloc_538188_839829468((&a0));
LOC12 = (Ropeobj178006*)0;
LOC12 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_374), LOC11, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC12, a0.s);
}
break;
case ((Ttypekind292244) 14):
case ((Ttypekind292244) 15):
{
TY532811 LOC14;
Ropeobj178006* LOC15;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rdloc_538188_839829468((&a0));
LOC14[1] = gentypeinfo_535941_839829468((*p0).module, t0);
LOC15 = (Ropeobj178006*)0;
LOC15 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_375), LOC14, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC15, a0.s);
}
break;
case ((Ttypekind292244) 28):
{
TY178507 LOC17;
Ropeobj178006* LOC18;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = rdloc_538188_839829468((&a0));
LOC18 = (Ropeobj178006*)0;
LOC18 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_376), LOC17, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC18, a0.s);
}
break;
case ((Ttypekind292244) 19):
{
TY532811 LOC20;
Ropeobj178006* LOC21;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = addrloc_538204_839829468((&a0));
LOC20[1] = gentypeinfo_535941_839829468((*p0).module, t0);
LOC21 = (Ropeobj178006*)0;
LOC21 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_377), LOC20, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC21, a0.s);
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
Tloc292816 b0;
TY532811 LOC34;
Ttype292840* LOC35;
Ropeobj178006* LOC36;
memset((void*)(&b0), 0, sizeof(b0));
switch ((*a0.t).kind) {
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
TY178507 LOC24;
Ropeobj178006* LOC25;
memset((void*)LOC24, 0, sizeof(LOC24));
LOC24[0] = rdloc_538188_839829468((&a0));
LOC25 = (Ropeobj178006*)0;
LOC25 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_378), LOC24, 1);
putintodest_550468_839829468(p0, (&b0), (*e0).typ, LOC25, a0.s);
}
break;
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
TY532811 LOC27;
Ropeobj178006* LOC28;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC27[0] = rdloc_538188_839829468((&a0));
LOC27[1] = lenfield_539305_839829468(p0);
LOC28 = (Ropeobj178006*)0;
LOC28 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_379), LOC27, 2);
putintodest_550468_839829468(p0, (&b0), (*e0).typ, LOC28, a0.s);
}
break;
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
TY532811 LOC30;
NI64 LOC31;
Ropeobj178006* LOC32;
memset((void*)LOC30, 0, sizeof(LOC30));
LOC30[0] = rdloc_538188_839829468((&a0));
LOC31 = (NI64)0;
LOC31 = lengthord_320007_3876443242(a0.t);
LOC30[1] = rope_178401_2381377266(LOC31);
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_380), LOC30, 2);
putintodest_550468_839829468(p0, (&b0), (*e0).typ, LOC32, a0.s);
}
break;
default:
{
internalerror_196100_155036129((*(*e0).kindU.S6.sons->data[((NI) 0)]).info, ((NimStringDesc*) &T839829468_381));
}
break;
}
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = rdloc_538188_839829468((&b0));
LOC35 = (Ttype292840*)0;
LOC35 = elemtype_320394_3876443242(t0);
LOC34[1] = gentypeinfo_535941_839829468((*p0).module, LOC35);
LOC36 = (Ropeobj178006*)0;
LOC36 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_382), LOC34, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC36, a0.s);
}
break;
case ((Ttypekind292244) 29):
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 22):
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 26):
case ((Ttypekind292244) 5):
case ((Ttypekind292244) 24):
{
TY532811 LOC38;
Ropeobj178006* LOC39;
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = rdloc_538188_839829468((&a0));
LOC38[1] = gentypeinfo_535941_839829468((*p0).module, t0);
LOC39 = (Ropeobj178006*)0;
LOC39 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_383), LOC38, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC39, a0.s);
}
break;
case ((Ttypekind292244) 3):
case ((Ttypekind292244) 62):
{
localerror_196085_155036129((*e0).info, ((NimStringDesc*) &T839829468_384));
}
break;
default:
{
TY532811 LOC42;
Ropeobj178006* LOC43;
memset((void*)LOC42, 0, sizeof(LOC42));
LOC42[0] = addrloc_538204_839829468((&a0));
LOC42[1] = gentypeinfo_535941_839829468((*p0).module, t0);
LOC43 = (Ropeobj178006*)0;
LOC43 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_383), LOC42, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC43, a0.s);
}
break;
}
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, gengettypeinfo_555383_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Ttype292840* t0;
Ropeobj178006* LOC1;
t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440));
LOC1 = (Ropeobj178006*)0;
LOC1 = gentypeinfo_535941_839829468((*p0).module, t0);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC1, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, genswap_555638_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 tmp0;
Ttype292840* LOC1;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&tmp0), 0, sizeof(tmp0));
LOC1 = (Ttype292840*)0;
LOC1 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
gettemp_537032_839829468(p0, LOC1, (&tmp0), NIM_FALSE);
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
genassignment_539264_839829468(p0, (&tmp0), (&a0), 0);
genassignment_539264_839829468(p0, (&a0), (&b0), 0);
genassignment_539264_839829468(p0, (&b0), (&tmp0), 0);
}
N_NIMCALL(void, unaryexpr_551209_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
TY178507 LOC1;
Ropeobj178006* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((&a0));
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, binarystmt_550501_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
Tloc292816 b0;
TY532811 LOC5;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
{
if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_387));
}
LA3: ;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468((&a0));
LOC5[1] = rdloc_538188_839829468((&b0));
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), frmt0, LOC5, 2);
}
N_NIMCALL(void, genstrconcat_554452_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 tmp0;
NI L0;
Ropeobj178006* appends0;
Ropeobj178006* lens0;
TY535238 LOC21;
Ropeobj178006** LOC22;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, (*e0).typ, (&tmp0), NIM_FALSE);
L0 = ((NI) 0);
appends0 = NIM_NIL;
lens0 = NIM_NIL;
{
NI i_554475_839829468;
NI HEX3Atmp_554547_839829468;
NI LOC2;
NI res_554550_839829468;
i_554475_839829468 = (NI)0;
HEX3Atmp_554547_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(e0);
HEX3Atmp_554547_839829468 = (NI)(LOC2 - ((NI) 2));
res_554550_839829468 = ((NI) 0);
{
while (1) {
if (!(res_554550_839829468 <= HEX3Atmp_554547_839829468)) goto LA4;
i_554475_839829468 = res_554550_839829468;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))], (&a0));
{
Ttype292840* LOC7;
TY532811 LOC10;
Ropeobj178006* LOC11;
LOC7 = (Ttype292840*)0;
LOC7 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).typ, IL64(211106242013440));
if (!((*LOC7).kind == ((Ttypekind292244) 2))) goto LA8;
L0 += ((NI) 1);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = tmp0.r;
LOC10[1] = rdloc_538188_839829468((&a0));
LOC11 = (Ropeobj178006*)0;
LOC11 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_390), LOC10, 2);
add_178482_2381377266(&appends0, LOC11);
}
goto LA5;
LA8: ;
{
TY532811 LOC19;
Ropeobj178006* LOC20;
{
if (!((*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).kind >= ((Tnodekind292020) 20) && (*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).kind <= ((Tnodekind292020) 22))) goto LA15;
L0 += ((*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).kindU.S3.strval ? (*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).kindU.S3.strval->Sup.len : 0);
}
goto LA13;
LA15: ;
{
TY532811 LOC18;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rdloc_538188_839829468((&a0));
LOC18[1] = lenfield_539305_839829468(p0);
addf_179205_2381377266(&lens0, ((NimStringDesc*) &T839829468_391), LOC18, 2);
}
LA13: ;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = tmp0.r;
LOC19[1] = rdloc_538188_839829468((&a0));
LOC20 = (Ropeobj178006*)0;
LOC20 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_392), LOC19, 2);
add_178482_2381377266(&appends0, LOC20);
}
LA5: ;
res_554550_839829468 += ((NI) 1);
} LA4: ;
}
}
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = tmp0.r;
LOC21[1] = lens0;
LOC21[2] = rope_178401_2381377266(((NI64) (L0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_393), LOC21, 3);
LOC22 = (Ropeobj178006**)0;
LOC22 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC22, appends0);
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA25;
genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI292816));
}
goto LA23;
LA25: ;
{
genassignment_539264_839829468(p0, (&(*d0)), (&tmp0), 0);
}
LA23: ;
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, genstrappend_554554_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 dest0;
Ropeobj178006* appends0;
Ropeobj178006* lens0;
NI L0;
TY535238 LOC21;
Ropeobj178006** LOC22;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&dest0), 0, sizeof(dest0));
appends0 = (Ropeobj178006*)0;
lens0 = (Ropeobj178006*)0;
L0 = ((NI) 0);
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&dest0));
{
NI i_554615_839829468;
NI HEX3Atmp_554676_839829468;
NI LOC2;
NI res_554679_839829468;
i_554615_839829468 = (NI)0;
HEX3Atmp_554676_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(e0);
HEX3Atmp_554676_839829468 = (NI)(LOC2 - ((NI) 3));
res_554679_839829468 = ((NI) 0);
{
while (1) {
if (!(res_554679_839829468 <= HEX3Atmp_554676_839829468)) goto LA4;
i_554615_839829468 = res_554679_839829468;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))], (&a0));
{
Ttype292840* LOC7;
TY532811 LOC10;
Ropeobj178006* LOC11;
LOC7 = (Ttype292840*)0;
LOC7 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).typ, IL64(211106242013440));
if (!((*LOC7).kind == ((Ttypekind292244) 2))) goto LA8;
L0 += ((NI) 1);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rdloc_538188_839829468((&dest0));
LOC10[1] = rdloc_538188_839829468((&a0));
LOC11 = (Ropeobj178006*)0;
LOC11 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_390), LOC10, 2);
add_178482_2381377266(&appends0, LOC11);
}
goto LA5;
LA8: ;
{
TY532811 LOC19;
Ropeobj178006* LOC20;
{
if (!((*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).kind >= ((Tnodekind292020) 20) && (*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).kind <= ((Tnodekind292020) 22))) goto LA15;
L0 += ((*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).kindU.S3.strval ? (*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).kindU.S3.strval->Sup.len : 0);
}
goto LA13;
LA15: ;
{
TY532811 LOC18;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rdloc_538188_839829468((&a0));
LOC18[1] = lenfield_539305_839829468(p0);
addf_179205_2381377266(&lens0, ((NimStringDesc*) &T839829468_391), LOC18, 2);
}
LA13: ;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdloc_538188_839829468((&dest0));
LOC19[1] = rdloc_538188_839829468((&a0));
LOC20 = (Ropeobj178006*)0;
LOC20 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_392), LOC19, 2);
add_178482_2381377266(&appends0, LOC20);
}
LA5: ;
res_554679_839829468 += ((NI) 1);
} LA4: ;
}
}
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = rdloc_538188_839829468((&dest0));
LOC21[1] = lens0;
LOC21[2] = rope_178401_2381377266(((NI64) (L0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_395), LOC21, 3);
LOC22 = (Ropeobj178006**)0;
LOC22 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC22, appends0);
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, genseqelemappend_554683_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
NimStringDesc* seqappendpattern0;
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 dest0;
Ttype292840* bt0;
TY535238 LOC8;
Ttype292840* LOC9;
TY532811 LOC10;
TY532811 LOC11;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!!(LOC3)) goto LA5;
seqappendpattern0 = copyString(((NimStringDesc*) &T839829468_396));
}
goto LA1;
LA5: ;
{
seqappendpattern0 = copyString(((NimStringDesc*) &T839829468_397));
}
LA1: ;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&dest0), 0, sizeof(dest0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
bt0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 2)]).typ, IL64(211106240964864));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468((&a0));
LOC9 = (Ttype292840*)0;
LOC9 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
LOC8[1] = gettypedesc_535673_839829468((*p0).module, LOC9);
LOC8[2] = gettypedesc_535673_839829468((*p0).module, bt0);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), seqappendpattern0, LOC8, 3);
initloc_532273_839829468((&dest0), ((Tlockind292808) 6), bt0, ((Tstorageloc292812) 3));
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rdloc_538188_839829468((&a0));
LOC10[1] = lenfield_539305_839829468(p0);
dest0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_398), LOC10, 2);
genassignment_539264_839829468(p0, (&dest0), (&b0), 3);
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = rdloc_538188_839829468((&a0));
LOC11[1] = lenfield_539305_839829468(p0);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_399), LOC11, 2);
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, binaryexpr_550549_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
Tloc292816 b0;
TY532811 LOC1;
Ropeobj178006* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((&a0));
LOC1[1] = rdloc_538188_839829468((&b0));
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, genstrequals_556667_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 x0;
Tnode292802* a0;
Tnode292802* b0;
memset((void*)(&x0), 0, sizeof(x0));
a0 = (*e0).kindU.S6.sons->data[((NI) 1)];
b0 = (*e0).kindU.S6.sons->data[((NI) 2)];
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*a0).kind == ((Tnodekind292020) 23));
if (LOC3) goto LA4;
LOC3 = ((*b0).kind == ((Tnodekind292020) 23));
LA4: ;
if (!LOC3) goto LA5;
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_341));
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC8;
TY532811 LOC12;
Ropeobj178006* LOC13;
LOC8 = (NIM_BOOL)0;
LOC8 = ((*a0).kind >= ((Tnodekind292020) 20) && (*a0).kind <= ((Tnodekind292020) 22));
if (!(LOC8)) goto LA9;
LOC8 = (((*a0).kindU.S3.strval) && ((*a0).kindU.S3.strval)->Sup.len == 0);
LA9: ;
if (!LOC8) goto LA10;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&x0));
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rdloc_538188_839829468((&x0));
LOC12[1] = lenfield_539305_839829468(p0);
LOC13 = (Ropeobj178006*)0;
LOC13 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_400), LOC12, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC13, ((Tstorageloc292812) 0));
}
goto LA1;
LA10: ;
{
NIM_BOOL LOC15;
TY532811 LOC19;
Ropeobj178006* LOC20;
LOC15 = (NIM_BOOL)0;
LOC15 = ((*b0).kind >= ((Tnodekind292020) 20) && (*b0).kind <= ((Tnodekind292020) 22));
if (!(LOC15)) goto LA16;
LOC15 = (((*b0).kindU.S3.strval) && ((*b0).kindU.S3.strval)->Sup.len == 0);
LA16: ;
if (!LOC15) goto LA17;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&x0));
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdloc_538188_839829468((&x0));
LOC19[1] = lenfield_539305_839829468(p0);
LOC20 = (Ropeobj178006*)0;
LOC20 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_400), LOC19, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC20, ((Tstorageloc292812) 0));
}
goto LA1;
LA17: ;
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_401));
}
LA1: ;
}
N_NIMCALL(void, genisnil_552620_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Ttype292840* t0;
t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106233624832));
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*t0).kind == ((Ttypekind292244) 25));
if (!(LOC3)) goto LA4;
LOC3 = ((*t0).callconv == ((Tcallingconvention292002) 8));
LA4: ;
if (!LOC3) goto LA5;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_404));
}
goto LA1;
LA5: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_405));
}
LA1: ;
}
N_NIMCALL(void, gendollar_555391_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
TY178507 LOC1;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((&a0));
a0.r = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 1);
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA4;
gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
}
LA4: ;
genassignment_539264_839829468(p0, (&(*d0)), (&a0), 0);
gcusage_554439_839829468(n0);
}
N_NIMCALL(Ropeobj178006*, genofhelper_555140_839829468)(Tcproc529021* p0, Ttype292840* dest0, Ropeobj178006* a0) {
Ropeobj178006* result0;
Ropeobj178006* ti0;
result0 = (Ropeobj178006*)0;
ti0 = gentypeinfo_535941_839829468((*p0).module, dest0);
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
TY532811 LOC9;
LOC3 = (NIM_BOOL)0;
LOC3 = (((*dest0).flags &(1U<<((NU)(((Ttypeflag292431) 2))&31U)))!=0);
if (LOC3) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = (((*(*p0).module).flags &(1U<<((NU)(((Codegenflag529025) 5))&7U)))!=0);
if (!(LOC5)) goto LA6;
LOC5 = !((((*dest0).flags &(1U<<((NU)(((Ttypeflag292431) 5))&31U)))!=0));
LA6: ;
LOC3 = LOC5;
LA4: ;
if (!LOC3) goto LA7;
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = a0;
LOC9[1] = ti0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_414), LOC9, 2);
}
goto LA1;
LA7: ;
{
Ropeobj178006* LOC11;
Ropeobj178006* cache0;
Ropeobj178006* LOC12;
TY178507 LOC13;
TY535238 LOC14;
LOC11 = (Ropeobj178006*)0;
LOC11 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_129));
(*(*p0).module).labels += ((NI) 1);
LOC12 = (Ropeobj178006*)0;
LOC12 = rope_178401_2381377266(((NI64) ((*(*p0).module).labels)));
cache0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_415), LOC12);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = cache0;
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_416), LOC13, 1);
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = a0;
LOC14[1] = ti0;
LOC14[2] = cache0;
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_417), LOC14, 3);
}
LA1: ;
return result0;
}
N_NIMCALL(void, genof_555201_839829468)(Tcproc529021* p0, Tnode292802* x0, Ttype292840* typ0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* dest0;
Ropeobj178006* r0;
Ropeobj178006* nilcheck0;
Ttype292840* t0;
Ttype292840* LOC41;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, x0, (&a0));
dest0 = skiptypes_296099_850551059(typ0, IL64(211106247256320));
r0 = rdloc_538188_839829468((&a0));
nilcheck0 = NIM_NIL;
t0 = skiptypes_296099_850551059(a0.t, IL64(211106232576256));
{
while (1) {
Ttype292840* LOC16;
if (!((14680064 &((NU64)1<<((NU)((*t0).kind)&63U)))!=0)) goto LA2;
{
if (!!(((*t0).kind == ((Ttypekind292244) 23)))) goto LA5;
nilcheck0 = r0;
}
LA5: ;
{
NIM_BOOL LOC9;
NIM_BOOL LOC11;
TY178507 LOC15;
LOC9 = (NIM_BOOL)0;
LOC9 = !(((*t0).kind == ((Ttypekind292244) 23)));
if (LOC9) goto LA10;
LOC11 = (NIM_BOOL)0;
LOC11 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC11) goto LA12;
LOC11 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA12: ;
LOC9 = !(LOC11);
LA10: ;
if (!LOC9) goto LA13;
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = r0;
r0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_124), LOC15, 1);
}
LA13: ;
LOC16 = (Ttype292840*)0;
LOC16 = lastson_295377_850551059(t0);
t0 = skiptypes_296099_850551059(LOC16, IL64(211106232576256));
} LA2: ;
}
{
NIM_BOOL LOC19;
LOC19 = (NIM_BOOL)0;
LOC19 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC19) goto LA20;
LOC19 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA20: ;
if (!!(LOC19)) goto LA21;
{
while (1) {
NIM_BOOL LOC25;
TY533289 LOC27;
Ropeobj178006* LOC28;
LOC25 = (NIM_BOOL)0;
LOC25 = ((*t0).kind == ((Ttypekind292244) 17));
if (!(LOC25)) goto LA26;
LOC25 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL));
LA26: ;
if (!LOC25) goto LA24;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC28 = (Ropeobj178006*)0;
LOC28 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_153), LOC27, 0);
add_178482_2381377266(&r0, LOC28);
t0 = skiptypes_296099_850551059((*t0).sons->data[((NI) 0)], IL64(211106247215360));
} LA24: ;
}
}
LA21: ;
{
NIM_BOOL LOC31;
LOC31 = (NIM_BOOL)0;
LOC31 = isobjlackingtypefield_533515_839829468(t0);
if (!LOC31) goto LA32;
globalerror_196071_155036129((*x0).info, ((Tmsgkind191002) 4), ((NimStringDesc*) &T839829468_412));
}
LA32: ;
{
TY532811 LOC38;
if (!!((nilcheck0 == NIM_NIL))) goto LA36;
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = nilcheck0;
LOC38[1] = genofhelper_555140_839829468(p0, dest0, r0);
r0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_413), LOC38, 2);
}
goto LA34;
LA36: ;
{
TY178507 LOC40;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = genofhelper_555140_839829468(p0, dest0, r0);
r0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_418), LOC40, 1);
}
LA34: ;
LOC41 = (Ttype292840*)0;
LOC41 = getsystype_338150_3937434831(((Ttypekind292244) 1));
putintodest_550468_839829468(p0, d0, LOC41, r0, a0.s);
}
N_NIMCALL(void, genof_555331_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
genof_555201_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (*(*n0).kindU.S6.sons->data[((NI) 2)]).typ, d0);
}
N_NIMCALL(void, rawgennew_554741_839829468)(Tcproc529021* p0, Tloc292816* a0, Ropeobj178006* sizeexpr_554745_839829468) {
Ropeobj178006* sizeexpr0;
Ttype292840* reftype0;
Tloc292816 b0;
TY535238 args0;
Ttype292840* bt0;
sizeexpr0 = sizeexpr_554745_839829468;
reftype0 = skiptypes_296099_850551059((*a0).t, IL64(211106242013440));
memset((void*)(&b0), 0, sizeof(b0));
initloc_532273_839829468((&b0), ((Tlockind292808) 6), (*a0).t, ((Tstorageloc292812) 3));
{
TY178507 LOC5;
Ttype292840* LOC6;
if (!sizeexpr0 == 0) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC6 = (Ttype292840*)0;
LOC6 = skiptypes_296099_850551059((*reftype0).sons->data[((NI) 0)], IL64(211106233624832));
LOC5[0] = gettypedesc_535673_839829468((*p0).module, LOC6);
sizeexpr0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_419), LOC5, 1);
}
LA3: ;
memset((void*)args0, 0, sizeof(args0));
args0[0] = gettypedesc_535673_839829468((*p0).module, reftype0);
args0[1] = gentypeinfo_535941_839829468((*p0).module, reftype0);
args0[2] = sizeexpr0;
{
NIM_BOOL LOC9;
TY532811 LOC21;
LOC9 = (NIM_BOOL)0;
LOC9 = ((*a0).s == ((Tstorageloc292812) 3));
if (!(LOC9)) goto LA10;
LOC9 = usesnativegc_169177_2607990831();
LA10: ;
if (!LOC9) goto LA11;
{
NIM_BOOL LOC15;
TY178507 LOC18;
LOC15 = (NIM_BOOL)0;
LOC15 = canformacycle_320123_3876443242((*a0).t);
if (!LOC15) goto LA16;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rdloc_538188_839829468(a0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_420), LOC18, 1);
}
goto LA13;
LA16: ;
{
TY178507 LOC20;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = rdloc_538188_839829468(a0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_255), LOC20, 1);
}
LA13: ;
b0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_421), args0, 3);
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = rdloc_538188_839829468(a0);
LOC21[1] = rdloc_538188_839829468((&b0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC21, 2);
}
goto LA7;
LA11: ;
{
b0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_422), args0, 3);
genassignment_539264_839829468(p0, a0, (&b0), 0);
}
LA7: ;
bt0 = skiptypes_296099_850551059((*reftype0).sons->data[((NI) 0)], IL64(211106233624832));
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), bt0, a0, NIM_FALSE);
}
N_NIMCALL(void, gennew_554782_839829468)(Tcproc529021* p0, Tnode292802* e0) {
Tloc292816 a0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
{
NI LOC3;
Tloc292816 se0;
Ropeobj178006* LOC6;
LOC3 = (NI)0;
LOC3 = len_293081_850551059(e0);
if (!(LOC3 == ((NI) 3))) goto LA4;
memset((void*)(&se0), 0, sizeof(se0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&se0));
LOC6 = (Ropeobj178006*)0;
LOC6 = rdloc_538188_839829468((&se0));
rawgennew_554741_839829468(p0, (&a0), LOC6);
}
goto LA1;
LA4: ;
{
rawgennew_554741_839829468(p0, (&a0), NIM_NIL);
}
LA1: ;
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, gennewfinalize_555111_839829468)(Tcproc529021* p0, Tnode292802* e0) {
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 f0;
Ttype292840* reftype0;
Ttype292840* bt0;
Ropeobj178006* ti0;
TY532811 LOC1;
TY535238 LOC2;
Ttype292840* LOC3;
Ttype292840* LOC4;
Ttype292840* LOC5;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&f0), 0, sizeof(f0));
reftype0 = (Ttype292840*)0;
bt0 = (Ttype292840*)0;
ti0 = (Ropeobj178006*)0;
reftype0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&f0));
initloc_532273_839829468((&b0), ((Tlockind292808) 6), a0.t, ((Tstorageloc292812) 3));
ti0 = gentypeinfo_535941_839829468((*p0).module, reftype0);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = ti0;
LOC1[1] = rdloc_538188_839829468((&f0));
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_423), LOC1, 2);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = gettypedesc_535673_839829468((*p0).module, reftype0);
LOC2[1] = ti0;
LOC3 = (Ttype292840*)0;
LOC3 = lastson_295377_850551059(reftype0);
LOC4 = (Ttype292840*)0;
LOC4 = skiptypes_296099_850551059(LOC3, IL64(211106233624832));
LOC2[2] = gettypedesc_535673_839829468((*p0).module, LOC4);
b0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_424), LOC2, 3);
genassignment_539264_839829468(p0, (&a0), (&b0), 0);
LOC5 = (Ttype292840*)0;
LOC5 = lastson_295377_850551059(reftype0);
bt0 = skiptypes_296099_850551059(LOC5, IL64(211106233624832));
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), bt0, (&a0), NIM_FALSE);
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, gennewseqaux_554795_839829468)(Tcproc529021* p0, Tloc292816* dest0, Ropeobj178006* length0) {
Ttype292840* seqtype0;
TY535238 args0;
Tloc292816 call0;
seqtype0 = skiptypes_296099_850551059((*dest0).t, IL64(211106242013440));
memset((void*)args0, 0, sizeof(args0));
args0[0] = gettypedesc_535673_839829468((*p0).module, seqtype0);
args0[1] = gentypeinfo_535941_839829468((*p0).module, seqtype0);
args0[2] = length0;
memset((void*)(&call0), 0, sizeof(call0));
initloc_532273_839829468((&call0), ((Tlockind292808) 6), (*dest0).t, ((Tstorageloc292812) 3));
{
NIM_BOOL LOC3;
TY532811 LOC15;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*dest0).s == ((Tstorageloc292812) 3));
if (!(LOC3)) goto LA4;
LOC3 = usesnativegc_169177_2607990831();
LA4: ;
if (!LOC3) goto LA5;
{
NIM_BOOL LOC9;
TY178507 LOC12;
LOC9 = (NIM_BOOL)0;
LOC9 = canformacycle_320123_3876443242((*dest0).t);
if (!LOC9) goto LA10;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rdloc_538188_839829468(dest0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_420), LOC12, 1);
}
goto LA7;
LA10: ;
{
TY178507 LOC14;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rdloc_538188_839829468(dest0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_255), LOC14, 1);
}
LA7: ;
call0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_425), args0, 3);
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = rdloc_538188_839829468(dest0);
LOC15[1] = rdloc_538188_839829468((&call0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC15, 2);
}
goto LA1;
LA5: ;
{
call0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_426), args0, 3);
genassignment_539264_839829468(p0, dest0, (&call0), 0);
}
LA1: ;
}
N_NIMCALL(void, gennewseq_554824_839829468)(Tcproc529021* p0, Tnode292802* e0) {
Tloc292816 a0;
Tloc292816 b0;
Ropeobj178006* LOC1;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
LOC1 = (Ropeobj178006*)0;
LOC1 = rdloc_538188_839829468((&b0));
gennewseqaux_554795_839829468(p0, (&a0), LOC1);
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, gennewseqofcap_554836_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Ttype292840* seqtype0;
Tloc292816 a0;
TY535238 LOC1;
Ropeobj178006* LOC2;
seqtype0 = skiptypes_296099_850551059((*e0).typ, IL64(211106242013440));
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = gettypedesc_535673_839829468((*p0).module, seqtype0);
LOC1[1] = gentypeinfo_535941_839829468((*p0).module, seqtype0);
LOC1[2] = rdloc_538188_839829468((&a0));
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_427), LOC1, 3);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
gcusage_554439_839829468(e0);
}
N_NIMCALL(Ropeobj178006*, getclosuretype_535685_839829468)(Tcgen529027* m0, Ttype292840* t0, Tclosuretypekind535681 kind0) {
Ropeobj178006* result0;
Intset268030 check0;
Ropeobj178006* rettype0;
Ropeobj178006* desc0;
result0 = (Ropeobj178006*)0;
memset((void*)(&check0), 0, sizeof(check0));
chckNil((void*)(&check0));
memset((void*)(&check0), 0, sizeof(check0));
initintset_268885_2627731572((&check0));
result0 = gettempname_533598_839829468(m0);
rettype0 = (Ropeobj178006*)0;
desc0 = (Ropeobj178006*)0;
genprocparams_534115_839829468(m0, t0, &rettype0, &desc0, (&check0), !((kind0 == ((Tclosuretypekind535681) 0))), NIM_FALSE);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = isimportedtype_533451_839829468(t0);
if (!!(LOC3)) goto LA4;
{
NIM_BOOL LOC8;
TY535235 LOC12;
LOC8 = (NIM_BOOL)0;
LOC8 = !(((*t0).callconv == ((Tcallingconvention292002) 8)));
if (LOC8) goto LA9;
LOC8 = !((kind0 == ((Tclosuretypekind535681) 2)));
LA9: ;
if (!LOC8) goto LA10;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rope_178277_2381377266(Callingconvtostr_533587_839829468[((*t0).callconv)- 0]);
LOC12[1] = rettype0;
LOC12[2] = result0;
LOC12[3] = desc0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_64), LOC12, 4);
}
goto LA6;
LA10: ;
{
TY535238 LOC14;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = result0;
LOC14[1] = rettype0;
LOC14[2] = desc0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_75), LOC14, 3);
}
LA6: ;
}
LA4: ;
return result0;
}
N_NIMCALL(void, gensomecast_556481_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* etyp0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
etyp0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832));
{
NIM_BOOL LOC3;
TY532811 LOC7;
Ropeobj178006* LOC8;
LOC3 = (NIM_BOOL)0;
LOC3 = ((IL64(281475111387152) &((NU64)1<<((NU)((*etyp0).kind)&63U)))!=0);
if (!(LOC3)) goto LA4;
LOC3 = !(((a0.flags &(1U<<((NU)(((Tlocflag292810) 0))&15U)))!=0));
LA4: ;
if (!LOC3) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = gettypedesc_535673_839829468((*p0).module, (*e0).typ);
LOC7[1] = addrloc_538204_839829468((&a0));
LOC8 = (Ropeobj178006*)0;
LOC8 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_429), LOC7, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC8, a0.s);
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC10;
TY532811 LOC14;
Ropeobj178006* LOC15;
LOC10 = (NIM_BOOL)0;
LOC10 = ((*etyp0).kind == ((Ttypekind292244) 25));
if (!(LOC10)) goto LA11;
LOC10 = ((*etyp0).callconv == ((Tcallingconvention292002) 8));
LA11: ;
if (!LOC10) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = getclosuretype_535685_839829468((*p0).module, etyp0, ((Tclosuretypekind535681) 1));
LOC14[1] = rdcharloc_538227_839829468((&a0));
LOC15 = (Ropeobj178006*)0;
LOC15 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_430), LOC14, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC15, a0.s);
}
goto LA1;
LA12: ;
{
TY532811 LOC17;
Ropeobj178006* LOC18;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = gettypedesc_535673_839829468((*p0).module, (*e0).typ);
LOC17[1] = rdcharloc_538227_839829468((&a0));
LOC18 = (Ropeobj178006*)0;
LOC18 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_430), LOC17, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC18, a0.s);
}
LA1: ;
}
N_NIMCALL(void, unaryexprchar_551222_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
TY178507 LOC1;
Ropeobj178006* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdcharloc_538227_839829468((&a0));
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, genord_556475_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
unaryexprchar_551222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_301));
}
N_NIMCALL(void, genarraylen_555415_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) {
Tnode292802* a0;
Ttype292840* typ0;
a0 = (*e0).kindU.S6.sons->data[((NI) 1)];
{
if (!((*a0).kind == ((Tnodekind292020) 64))) goto LA3;
a0 = (*a0).kindU.S6.sons->data[((NI) 0)];
}
LA3: ;
typ0 = skiptypes_296099_850551059((*a0).typ, IL64(211106240964864));
switch ((*typ0).kind) {
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
{
if (!(op0 == ((Tmagic292524) 8))) goto LA8;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_431));
}
goto LA6;
LA8: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_432));
}
LA6: ;
}
break;
case ((Ttypekind292244) 29):
{
usestringh_532345_839829468((*p0).module);
{
if (!(op0 == ((Tmagic292524) 8))) goto LA14;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_433));
}
goto LA12;
LA14: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_434));
}
LA12: ;
}
break;
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC20) goto LA21;
LOC20 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA21: ;
if (!!(LOC20)) goto LA22;
{
if (!(op0 == ((Tmagic292524) 8))) goto LA26;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_435));
}
goto LA24;
LA26: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_436));
}
LA24: ;
}
goto LA18;
LA22: ;
{
{
if (!(op0 == ((Tmagic292524) 8))) goto LA32;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_437));
}
goto LA30;
LA32: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_438));
}
LA30: ;
}
LA18: ;
}
break;
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
{
NI64 LOC40;
Ropeobj178006* LOC41;
if (!(op0 == ((Tmagic292524) 8))) goto LA38;
LOC40 = (NI64)0;
LOC40 = lastord_320004_3876443242(typ0);
LOC41 = (Ropeobj178006*)0;
LOC41 = rope_178401_2381377266(LOC40);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC41, ((Tstorageloc292812) 0));
}
goto LA36;
LA38: ;
{
NI64 LOC43;
Ropeobj178006* LOC44;
LOC43 = (NI64)0;
LOC43 = lengthord_320007_3876443242(typ0);
LOC44 = (Ropeobj178006*)0;
LOC44 = rope_178401_2381377266(LOC43);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC44, ((Tstorageloc292812) 0));
}
LA36: ;
}
break;
default:
{
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_439));
}
break;
}
}
N_NIMCALL(void, unarystmt_550527_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
TY178507 LOC5;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_442));
}
LA3: ;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468((&a0));
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), frmt0, LOC5, 1);
}
N_NIMCALL(void, gensetlengthstr_555632_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
binarystmt_550501_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_445));
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, gensetlengthseq_555500_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* t0;
NimStringDesc* setlenpattern0;
TY535235 LOC8;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!!(LOC3)) goto LA5;
setlenpattern0 = copyString(((NimStringDesc*) &T839829468_446));
}
goto LA1;
LA5: ;
{
setlenpattern0 = copyString(((NimStringDesc*) &T839829468_447));
}
LA1: ;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468((&a0));
LOC8[1] = rdloc_538188_839829468((&b0));
LOC8[2] = gettypedesc_535673_839829468((*p0).module, t0);
LOC8[3] = gettypedesc_535673_839829468((*p0).module, (*t0).sons->data[((NI) 0)]);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), setlenpattern0, LOC8, 4);
gcusage_554439_839829468(e0);
}
N_NIMCALL(Ropeobj178006*, rdsetelemloc_555662_839829468)(Tloc292816* a0, Ttype292840* settype0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = rdcharloc_538227_839829468(a0);
{
NI64 LOC3;
TY532811 LOC6;
NI64 LOC7;
LOC3 = (NI64)0;
LOC3 = firstord_320001_3876443242(settype0);
if (!!((LOC3 == IL64(0)))) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = result0;
LOC7 = (NI64)0;
LOC7 = firstord_320001_3876443242(settype0);
LOC6[1] = rope_178401_2381377266(LOC7);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_448), LOC6, 2);
}
LA4: ;
return result0;
}
N_NIMCALL(void, binarystmtinexcl_555858_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
Tloc292816 b0;
TY532811 LOC1;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((&a0));
LOC1[1] = rdsetelemloc_555662_839829468((&b0), a0.t);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), frmt0, LOC1, 2);
}
N_NIMCALL(void, binaryexprchar_550809_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
Tloc292816 b0;
TY532811 LOC1;
Ropeobj178006* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdcharloc_538227_839829468((&a0));
LOC1[1] = rdcharloc_538227_839829468((&b0));
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
}
N_NIMCALL(NIM_BOOL, fewcmps_555803_839829468)(Tnode292802* s0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
{
if (!!(((*s0).kind == ((Tnodekind292020) 39)))) goto LA3;
internalerror_196100_155036129((*s0).info, ((NimStringDesc*) &T839829468_463));
}
LA3: ;
{
NIM_BOOL LOC7;
NI64 LOC8;
LOC7 = (NIM_BOOL)0;
LOC8 = (NI64)0;
LOC8 = getsize_320135_3876443242((*s0).typ);
LOC7 = (LOC8 <= ((NI64) (intsize_176641_4151366050)));
if (!(LOC7)) goto LA9;
LOC7 = (((*s0).flags &(1U<<((NU)(((Tnodeflag292427) 4))&15U)))!=0);
LA9: ;
if (!LOC7) goto LA10;
result0 = NIM_FALSE;
}
goto LA5;
LA10: ;
{
Ttype292840* LOC13;
LOC13 = (Ttype292840*)0;
LOC13 = elemtype_320394_3876443242((*s0).typ);
if (!((IL64(62277025792) &((NU64)1<<((NU)((*LOC13).kind)&63U)))!=0)) goto LA14;
result0 = NIM_TRUE;
}
goto LA5;
LA14: ;
{
NI LOC17;
LOC17 = (NI)0;
LOC17 = sonslen_295351_850551059(s0);
result0 = (LOC17 <= ((NI) 8));
}
LA5: ;
return result0;
}
N_NIMCALL(void, binaryexprin_555837_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* a0, Tloc292816* b0, Tloc292816* d0, NimStringDesc* frmt0) {
TY532811 LOC1;
Ropeobj178006* LOC2;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((&(*a0)));
LOC1[1] = rdsetelemloc_555662_839829468((&(*b0)), (*a0).t);
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX25_178905_2381377266(frmt0, LOC1, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, geninexpraux_553496_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* a0, Tloc292816* b0, Tloc292816* d0) {
Ttype292840* LOC1;
NI64 LOC2;
LOC1 = (Ttype292840*)0;
LOC1 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
LOC2 = (NI64)0;
LOC2 = getsize_320135_3876443242(LOC1);
switch (((NI) (LOC2))) {
case ((NI) 1):
{
binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_467));
}
break;
case ((NI) 2):
{
binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_468));
}
break;
case ((NI) 4):
{
binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_469));
}
break;
case ((NI) 8):
{
binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_470));
}
break;
default:
{
binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_471));
}
break;
}
}
N_NIMCALL(void, geninop_556009_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 x0;
Tloc292816 y0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&x0), 0, sizeof(x0));
memset((void*)(&y0), 0, sizeof(y0));
{
NIM_BOOL LOC3;
Tnode292802* ea0;
NI length0;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind292020) 39));
if (!(LOC3)) goto LA4;
LOC3 = fewcmps_555803_839829468((*e0).kindU.S6.sons->data[((NI) 1)]);
LA4: ;
if (!LOC3) goto LA5;
{
if (!((*(*e0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 70) || (*(*e0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 69))) goto LA9;
ea0 = (*(*e0).kindU.S6.sons->data[((NI) 2)]).kindU.S6.sons->data[((NI) 0)];
}
goto LA7;
LA9: ;
{
ea0 = (*e0).kindU.S6.sons->data[((NI) 2)];
}
LA7: ;
initlocexpr_539283_839829468(p0, ea0, (&a0));
initloc_532273_839829468((&b0), ((Tlockind292808) 6), (*e0).typ, ((Tstorageloc292812) 0));
b0.r = rope_178277_2381377266(((NimStringDesc*) &T839829468_118));
length0 = sonslen_295351_850551059((*e0).kindU.S6.sons->data[((NI) 1)]);
{
NI i_556061_839829468;
NI HEX3Atmp_556412_839829468;
NI res_556415_839829468;
i_556061_839829468 = (NI)0;
HEX3Atmp_556412_839829468 = (NI)0;
HEX3Atmp_556412_839829468 = (NI)(length0 - ((NI) 1));
res_556415_839829468 = ((NI) 0);
{
while (1) {
if (!(res_556415_839829468 <= HEX3Atmp_556412_839829468)) goto LA14;
i_556061_839829468 = res_556415_839829468;
{
TY535238 LOC19;
if (!((*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_556061_839829468]).kind == ((Tnodekind292020) 44))) goto LA17;
initlocexpr_539283_839829468(p0, (*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_556061_839829468]).kindU.S6.sons->data[((NI) 0)], (&x0));
initlocexpr_539283_839829468(p0, (*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_556061_839829468]).kindU.S6.sons->data[((NI) 1)], (&y0));
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdcharloc_538227_839829468((&a0));
LOC19[1] = rdcharloc_538227_839829468((&x0));
LOC19[2] = rdcharloc_538227_839829468((&y0));
addf_179205_2381377266(&b0.r, ((NimStringDesc*) &T839829468_464), LOC19, 3);
}
goto LA15;
LA17: ;
{
TY532811 LOC21;
initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_556061_839829468], (&x0));
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = rdcharloc_538227_839829468((&a0));
LOC21[1] = rdcharloc_538227_839829468((&x0));
addf_179205_2381377266(&b0.r, ((NimStringDesc*) &T839829468_465), LOC21, 2);
}
LA15: ;
{
if (!(i_556061_839829468 < (NI)(length0 - ((NI) 1)))) goto LA24;
add_178487_2381377266(&b0.r, ((NimStringDesc*) &T839829468_466));
}
LA24: ;
res_556415_839829468 += ((NI) 1);
} LA14: ;
}
}
add_178487_2381377266(&b0.r, ((NimStringDesc*) &T839829468_117));
putintodest_550468_839829468(p0, d0, (*e0).typ, b0.r, ((Tstorageloc292812) 0));
}
goto LA1;
LA5: ;
{
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
geninexpraux_553496_839829468(p0, e0, (&a0), (&b0), d0);
}
LA1: ;
}
N_NIMCALL(void, gensetop_556419_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) {
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 i0;
Ttype292840* settype0;
NI size0;
NI64 LOC1;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&i0), 0, sizeof(i0));
settype0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
LOC1 = (NI64)0;
LOC1 = getsize_320135_3876443242(settype0);
size0 = ((NI) (LOC1));
switch (size0) {
case ((NI) 1):
case ((NI) 2):
case ((NI) 4):
case ((NI) 8):
{
switch (op0) {
case ((Tmagic292524) 39):
{
NimStringDesc* ts0;
NimStringDesc* LOC4;
NimStringDesc* LOC5;
NimStringDesc* LOC6;
LOC4 = (NimStringDesc*)0;
LOC5 = (NimStringDesc*)0;
LOC5 = nimIntToStr((NI)(size0 * ((NI) 8)));
LOC4 = rawNewString(LOC5->Sup.len + 2);
appendString(LOC4, ((NimStringDesc*) &T839829468_45));
appendString(LOC4, LOC5);
ts0 = LOC4;
LOC6 = (NimStringDesc*)0;
LOC6 = rawNewString(ts0->Sup.len + ts0->Sup.len + 35);
appendString(LOC6, ((NimStringDesc*) &T839829468_449));
appendString(LOC6, ts0);
appendString(LOC6, ((NimStringDesc*) &T839829468_450));
appendString(LOC6, ts0);
appendString(LOC6, ((NimStringDesc*) &T839829468_451));
binarystmtinexcl_555858_839829468(p0, e0, d0, LOC6);
}
break;
case ((Tmagic292524) 40):
{
NimStringDesc* ts0;
NimStringDesc* LOC8;
NimStringDesc* LOC9;
NimStringDesc* LOC10;
LOC8 = (NimStringDesc*)0;
LOC9 = (NimStringDesc*)0;
LOC9 = nimIntToStr((NI)(size0 * ((NI) 8)));
LOC8 = rawNewString(LOC9->Sup.len + 2);
appendString(LOC8, ((NimStringDesc*) &T839829468_45));
appendString(LOC8, LOC9);
ts0 = LOC8;
LOC10 = (NimStringDesc*)0;
LOC10 = rawNewString(ts0->Sup.len + ts0->Sup.len + 42);
appendString(LOC10, ((NimStringDesc*) &T839829468_452));
appendString(LOC10, ts0);
appendString(LOC10, ((NimStringDesc*) &T839829468_453));
appendString(LOC10, ts0);
appendString(LOC10, ((NimStringDesc*) &T839829468_454));
binarystmtinexcl_555858_839829468(p0, e0, d0, LOC10);
}
break;
case ((Tmagic292524) 41):
{
{
if (!(size0 <= ((NI) 4))) goto LA14;
unaryexprchar_551222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_455));
}
goto LA12;
LA14: ;
{
unaryexprchar_551222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_456));
}
LA12: ;
}
break;
case ((Tmagic292524) 133):
{
binaryexprchar_550809_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_457));
}
break;
case ((Tmagic292524) 132):
{
binaryexprchar_550809_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_458));
}
break;
case ((Tmagic292524) 131):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_341));
}
break;
case ((Tmagic292524) 134):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_459));
}
break;
case ((Tmagic292524) 135):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_460));
}
break;
case ((Tmagic292524) 136):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_461));
}
break;
case ((Tmagic292524) 137):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_462));
}
break;
case ((Tmagic292524) 148):
{
geninop_556009_839829468(p0, e0, d0);
}
break;
default:
{
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_472));
}
break;
}
}
break;
default:
{
switch (op0) {
case ((Tmagic292524) 39):
{
binarystmtinexcl_555858_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_473));
}
break;
case ((Tmagic292524) 40):
{
binarystmtinexcl_555858_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_474));
}
break;
case ((Tmagic292524) 41):
{
NimStringDesc* LOC30;
NimStringDesc* LOC31;
LOC30 = (NimStringDesc*)0;
LOC31 = (NimStringDesc*)0;
LOC31 = nimIntToStr(size0);
LOC30 = rawNewString(LOC31->Sup.len + 14);
appendString(LOC30, ((NimStringDesc*) &T839829468_475));
appendString(LOC30, LOC31);
appendChar(LOC30, 41);
unaryexprchar_551222_839829468(p0, e0, d0, LOC30);
}
break;
case ((Tmagic292524) 133):
case ((Tmagic292524) 132):
{
Ttype292840* LOC33;
TY536475 LOC39;
LOC33 = (Ttype292840*)0;
LOC33 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC33, (&i0), NIM_FALSE);
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
{
Ttype292840* LOC38;
if (!((*d0).k == ((Tlockind292808) 0))) goto LA36;
LOC38 = (Ttype292840*)0;
LOC38 = getsystype_338150_3937434831(((Ttypekind292244) 1));
gettemp_537032_839829468(p0, LOC38, d0, NIM_FALSE);
}
LA36: ;
memset((void*)LOC39, 0, sizeof(LOC39));
LOC39[0] = rdloc_538188_839829468((&i0));
LOC39[1] = rope_178401_2381377266(((NI64) (size0)));
LOC39[2] = rdloc_538188_839829468((&(*d0)));
LOC39[3] = rdloc_538188_839829468((&a0));
LOC39[4] = rdloc_538188_839829468((&b0));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), lookupopr_556426_839829468[(op0)- 132], LOC39, 5);
}
break;
case ((Tmagic292524) 131):
{
NimStringDesc* LOC41;
NimStringDesc* LOC42;
usestringh_532345_839829468((*p0).module);
LOC41 = (NimStringDesc*)0;
LOC42 = (NimStringDesc*)0;
LOC42 = nimIntToStr(size0);
LOC41 = rawNewString(LOC42->Sup.len + 21);
appendString(LOC41, ((NimStringDesc*) &T839829468_481));
appendString(LOC41, LOC42);
appendString(LOC41, ((NimStringDesc*) &T839829468_482));
binaryexprchar_550809_839829468(p0, e0, d0, LOC41);
}
break;
case ((Tmagic292524) 134):
case ((Tmagic292524) 135):
case ((Tmagic292524) 136):
case ((Tmagic292524) 137):
{
Ttype292840* LOC44;
TY536847 LOC49;
LOC44 = (Ttype292840*)0;
LOC44 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC44, (&i0), NIM_FALSE);
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA47;
gettemp_537032_839829468(p0, a0.t, d0, NIM_FALSE);
}
LA47: ;
memset((void*)LOC49, 0, sizeof(LOC49));
LOC49[0] = rdloc_538188_839829468((&i0));
LOC49[1] = rope_178401_2381377266(((NI64) (size0)));
LOC49[2] = rdloc_538188_839829468((&(*d0)));
LOC49[3] = rdloc_538188_839829468((&a0));
LOC49[4] = rdloc_538188_839829468((&b0));
LOC49[5] = rope_178277_2381377266(lookupopr_556426_839829468[(op0)- 132]);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_483), LOC49, 6);
}
break;
case ((Tmagic292524) 148):
{
geninop_556009_839829468(p0, e0, d0);
}
break;
default:
{
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_484));
}
break;
}
}
break;
}
}
static N_INLINE(Ropeobj178006*, genargstringtocstring_539776_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
Tloc292816 a0;
TY178507 LOC1;
result0 = (Ropeobj178006*)0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((&a0));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_485), LOC1, 1);
return result0;
}
N_NIMCALL(Ropeobj178006*, openarrayloc_539665_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
Tloc292816 a0;
Tnode292802* q0;
result0 = (Ropeobj178006*)0;
memset((void*)(&a0), 0, sizeof(a0));
q0 = skipconv_328882_3876443242(n0);
{
Tmagic292524 LOC3;
Tloc292816 b0;
Tloc292816 c0;
Tnode292802* LOC6;
Tnode292802* LOC7;
Tnode292802* LOC8;
NimStringDesc* fmt0;
Ttype292840* LOC9;
TY535238 LOC25;
LOC3 = (Tmagic292524)0;
LOC3 = getmagic_318502_2616423590(q0);
if (!(LOC3 == ((Tmagic292524) 139))) goto LA4;
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&c0), 0, sizeof(c0));
LOC6 = (Tnode292802*)0;
LOC6 = HEX5BHEX5D_293238_850551059(q0, ((NI) 1));
initlocexpr_539283_839829468(p0, LOC6, (&a0));
LOC7 = (Tnode292802*)0;
LOC7 = HEX5BHEX5D_293238_850551059(q0, ((NI) 2));
initlocexpr_539283_839829468(p0, LOC7, (&b0));
LOC8 = (Tnode292802*)0;
LOC8 = HEX5BHEX5D_293238_850551059(q0, ((NI) 3));
initlocexpr_539283_839829468(p0, LOC8, (&c0));
LOC9 = (Ttype292840*)0;
LOC9 = skiptypes_296099_850551059(a0.t, IL64(211106243062016));
switch ((*LOC9).kind) {
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
fmt0 = copyString(((NimStringDesc*) &T839829468_486));
}
break;
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
{
NIM_BOOL LOC14;
Ttype292840* LOC15;
NIM_BOOL LOC17;
LOC14 = (NIM_BOOL)0;
LOC15 = (Ttype292840*)0;
LOC15 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256));
LOC14 = ((*LOC15).kind == ((Ttypekind292244) 23));
if (!(LOC14)) goto LA16;
LOC17 = (NIM_BOOL)0;
LOC17 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC17) goto LA18;
LOC17 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA18: ;
LOC14 = !(LOC17);
LA16: ;
if (!LOC14) goto LA19;
fmt0 = copyString(((NimStringDesc*) &T839829468_487));
}
goto LA12;
LA19: ;
{
fmt0 = copyString(((NimStringDesc*) &T839829468_488));
}
LA12: ;
}
break;
default:
{
NimStringDesc* LOC23;
NimStringDesc* LOC24;
LOC23 = (NimStringDesc*)0;
LOC24 = (NimStringDesc*)0;
LOC24 = typetostring_320017_3876443242(a0.t, ((Tprefereddesc320011) 0));
LOC23 = rawNewString(LOC24->Sup.len + 14);
appendString(LOC23, ((NimStringDesc*) &T839829468_489));
appendString(LOC23, LOC24);
internalerror_196113_155036129(LOC23);
fmt0 = copyString(((NimStringDesc*) &T839829468_490));
}
break;
}
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = rdloc_538188_839829468((&a0));
LOC25[1] = rdloc_538188_839829468((&b0));
LOC25[2] = rdloc_538188_839829468((&c0));
result0 = HEX25_178905_2381377266(fmt0, LOC25, 3);
}
goto LA1;
LA4: ;
{
Ttype292840* LOC27;
initlocexpr_539283_839829468(p0, n0, (&a0));
LOC27 = (Ttype292840*)0;
LOC27 = skiptypes_296099_850551059(a0.t, IL64(211106240964864));
switch ((*LOC27).kind) {
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
TY178507 LOC29;
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = rdloc_538188_839829468((&a0));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_378), LOC29, 1);
}
break;
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
{
NIM_BOOL LOC33;
Ttype292840* LOC34;
NIM_BOOL LOC36;
TY532811 LOC40;
LOC33 = (NIM_BOOL)0;
LOC34 = (Ttype292840*)0;
LOC34 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256));
LOC33 = ((*LOC34).kind == ((Ttypekind292244) 23));
if (!(LOC33)) goto LA35;
LOC36 = (NIM_BOOL)0;
LOC36 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC36) goto LA37;
LOC36 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA37: ;
LOC33 = !(LOC36);
LA35: ;
if (!LOC33) goto LA38;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = rdloc_538188_839829468((&a0));
LOC40[1] = lenfield_539305_839829468(p0);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_491), LOC40, 2);
}
goto LA31;
LA38: ;
{
TY532811 LOC42;
memset((void*)LOC42, 0, sizeof(LOC42));
LOC42[0] = rdloc_538188_839829468((&a0));
LOC42[1] = lenfield_539305_839829468(p0);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_379), LOC42, 2);
}
LA31: ;
}
break;
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
TY532811 LOC44;
NI64 LOC45;
memset((void*)LOC44, 0, sizeof(LOC44));
LOC44[0] = rdloc_538188_839829468((&a0));
LOC45 = (NI64)0;
LOC45 = lengthord_320007_3876443242(a0.t);
LOC44[1] = rope_178401_2381377266(LOC45);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_380), LOC44, 2);
}
break;
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 22):
{
Ttype292840* LOC47;
LOC47 = (Ttype292840*)0;
LOC47 = lastson_295377_850551059(a0.t);
switch ((*LOC47).kind) {
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
TY532811 LOC49;
memset((void*)LOC49, 0, sizeof(LOC49));
LOC49[0] = rdloc_538188_839829468((&a0));
LOC49[1] = lenfield_539305_839829468(p0);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_491), LOC49, 2);
}
break;
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
TY532811 LOC51;
Ttype292840* LOC52;
NI64 LOC53;
memset((void*)LOC51, 0, sizeof(LOC51));
LOC51[0] = rdloc_538188_839829468((&a0));
LOC52 = (Ttype292840*)0;
LOC52 = lastson_295377_850551059(a0.t);
LOC53 = (NI64)0;
LOC53 = lengthord_320007_3876443242(LOC52);
LOC51[1] = rope_178401_2381377266(LOC53);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_380), LOC51, 2);
}
break;
default:
{
NimStringDesc* LOC55;
NimStringDesc* LOC56;
LOC55 = (NimStringDesc*)0;
LOC56 = (NimStringDesc*)0;
LOC56 = typetostring_320017_3876443242(a0.t, ((Tprefereddesc320011) 0));
LOC55 = rawNewString(LOC56->Sup.len + 14);
appendString(LOC55, ((NimStringDesc*) &T839829468_489));
appendString(LOC55, LOC56);
internalerror_196113_155036129(LOC55);
}
break;
}
}
break;
default:
{
NimStringDesc* LOC58;
NimStringDesc* LOC59;
LOC58 = (NimStringDesc*)0;
LOC59 = (NimStringDesc*)0;
LOC59 = typetostring_320017_3876443242(a0.t, ((Tprefereddesc320011) 0));
LOC58 = rawNewString(LOC59->Sup.len + 14);
appendString(LOC58, ((NimStringDesc*) &T839829468_489));
appendString(LOC58, LOC59);
internalerror_196113_155036129(LOC58);
}
break;
}
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genarg_539787_839829468)(Tcproc529021* p0, Tnode292802* n_539790_839829468, Tsym292834* param0, Tnode292802* call0) {
Ropeobj178006* result0;
Tloc292816 a0;
result0 = (Ropeobj178006*)0;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!((*n_539790_839829468).kind == ((Tnodekind292020) 71))) goto LA3;
result0 = genargstringtocstring_539776_839829468(p0, n_539790_839829468);
}
goto LA1;
LA3: ;
{
Ttype292840* LOC6;
Tnode292802* n0;
LOC6 = (Ttype292840*)0;
LOC6 = skiptypes_296099_850551059((*param0).typ, IL64(211106240964864));
if (!((IL64(281475110928384) &((NU64)1<<((NU)((*LOC6).kind)&63U)))!=0)) goto LA7;
{
if (!!(((*n_539790_839829468).kind == ((Tnodekind292020) 64)))) goto LA11;
n0 = n_539790_839829468;
}
goto LA9;
LA11: ;
{
n0 = (*n_539790_839829468).kindU.S6.sons->data[((NI) 0)];
}
LA9: ;
result0 = openarrayloc_539665_839829468(p0, n0);
}
goto LA1;
LA7: ;
{
NIM_BOOL LOC15;
LOC15 = (NIM_BOOL)0;
LOC15 = ccgintroducedptr_533611_839829468(param0);
if (!LOC15) goto LA16;
initlocexpr_539283_839829468(p0, n_539790_839829468, (&a0));
result0 = addrloc_538204_839829468((&a0));
}
goto LA1;
LA16: ;
{
NIM_BOOL LOC19;
NIM_BOOL LOC20;
NIM_BOOL LOC21;
Tnode292802* callee0;
LOC19 = (NIM_BOOL)0;
LOC20 = (NIM_BOOL)0;
LOC21 = (NIM_BOOL)0;
LOC21 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC21) goto LA22;
LOC21 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA22: ;
LOC20 = LOC21;
if (!(LOC20)) goto LA23;
LOC20 = ((*(*param0).typ).kind == ((Ttypekind292244) 23));
LA23: ;
LOC19 = LOC20;
if (!(LOC19)) goto LA24;
LOC19 = ((*n_539790_839829468).kind == ((Tnodekind292020) 64));
LA24: ;
if (!LOC19) goto LA25;
initlocexprsingleuse_539289_839829468(p0, (*n_539790_839829468).kindU.S6.sons->data[((NI) 0)], (&a0));
callee0 = (*call0).kindU.S6.sons->data[((NI) 0)];
{
NIM_BOOL LOC29;
NIM_BOOL LOC30;
LOC29 = (NIM_BOOL)0;
LOC30 = (NIM_BOOL)0;
LOC30 = ((*callee0).kind == ((Tnodekind292020) 3));
if (!(LOC30)) goto LA31;
LOC30 = ((134283296 & (*(*callee0).kindU.S4.sym).flags) == 32);
LA31: ;
LOC29 = LOC30;
if (!(LOC29)) goto LA32;
LOC29 = !(((72 & (*(*callee0).kindU.S4.sym).loc.flags) == 0));
LA32: ;
if (!LOC29) goto LA33;
result0 = addrloc_538204_839829468((&a0));
}
goto LA27;
LA33: ;
{
result0 = rdloc_538188_839829468((&a0));
}
LA27: ;
}
goto LA1;
LA25: ;
{
initlocexprsingleuse_539289_839829468(p0, n_539790_839829468, (&a0));
result0 = rdloc_538188_839829468((&a0));
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genargnoparam_539938_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
Tloc292816 a0;
result0 = (Ropeobj178006*)0;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!((*n0).kind == ((Tnodekind292020) 71))) goto LA3;
result0 = genargstringtocstring_539776_839829468(p0, n0);
}
goto LA1;
LA3: ;
{
initlocexprsingleuse_539289_839829468(p0, n0, (&a0));
result0 = rdloc_538188_839829468((&a0));
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, getrawproctype_540459_839829468)(Tcproc529021* p0, Ttype292840* t0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = getclosuretype_535685_839829468((*p0).module, t0, ((Tclosuretypekind535681) 0));
return result0;
}
N_NIMCALL(NIM_BOOL, leftappearsonrightside_539329_839829468)(Tnode292802* le0, Tnode292802* ri0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
{
if (!!((le0 == NIM_NIL))) goto LA3;
{
NI i_539364_839829468;
NI HEX3Atmp_539376_839829468;
NI LOC6;
NI res_539379_839829468;
i_539364_839829468 = (NI)0;
HEX3Atmp_539376_839829468 = (NI)0;
LOC6 = (NI)0;
LOC6 = len_293081_850551059(ri0);
HEX3Atmp_539376_839829468 = (LOC6 - 1);
res_539379_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* r0;
if (!(res_539379_839829468 <= HEX3Atmp_539376_839829468)) goto LA8;
i_539364_839829468 = res_539379_839829468;
r0 = HEX5BHEX5D_293238_850551059(ri0, i_539364_839829468);
{
Tanalysisresult473003 LOC11;
LOC11 = (Tanalysisresult473003)0;
LOC11 = ispartof_473340_788060399(le0, r0);
if (!!((LOC11 == ((Tanalysisresult473003) 0)))) goto LA12;
result0 = NIM_TRUE;
goto BeforeRet;
}
LA12: ;
res_539379_839829468 += ((NI) 1);
} LA8: ;
}
}
}
LA3: ;
}BeforeRet: ;
return result0;
}
static N_INLINE(NIM_BOOL, hasnoinit_539383_839829468)(Tnode292802* call0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = ((*(*call0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC1)) goto LA2;
LOC1 = (((*(*(*call0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, resetloc_538350_839829468)(Tcproc529021* p0, Tloc292816* loc0) {
NIM_BOOL containsgcref0;
Ttype292840* typ0;
{ containsgcref0 = containsgarbagecollectedref_320117_3876443242((*loc0).t);
typ0 = skiptypes_296099_850551059((*loc0).t, IL64(211106242013440));
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = isimportedcpptype_533478_839829468(typ0);
if (!LOC3) goto LA4;
goto BeforeRet;
}
LA4: ;
{
NIM_BOOL LOC8;
LOC8 = (NIM_BOOL)0;
LOC8 = iscomplexvaluetype_538317_839829468(typ0);
if (!!(LOC8)) goto LA9;
{
Tloc292816 nilloc0;
if (!containsgcref0) goto LA13;
memset((void*)(&nilloc0), 0, sizeof(nilloc0));
initloc_532273_839829468((&nilloc0), ((Tlockind292808) 1), (*loc0).t, ((Tstorageloc292812) 2));
nilloc0.r = rope_178277_2381377266(((NimStringDesc*) &T839829468_174));
genrefassign_538311_839829468(p0, (&(*loc0)), (&nilloc0), 8);
}
goto LA11;
LA13: ;
{
TY178507 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468((&(*loc0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_494), LOC16, 1);
}
LA11: ;
}
goto LA6;
LA9: ;
{
{
TY178507 LOC22;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 6))&31U)))!=0)) goto LA20;
memset((void*)LOC22, 0, sizeof(LOC22));
LOC22[0] = addrloc_538204_839829468((&(*loc0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_495), LOC22, 1);
}
LA20: ;
{
TY532811 LOC27;
if (!!(((*loc0).s == ((Tstorageloc292812) 2)))) goto LA25;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC27[0] = addrloc_538204_839829468((&(*loc0)));
LOC27[1] = gentypeinfo_535941_839829468((*p0).module, (*loc0).t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_496), LOC27, 2);
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), (*loc0).t, (&(*loc0)), NIM_TRUE);
}
goto LA23;
LA25: ;
{
TY532811 LOC29;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = addrloc_538204_839829468((&(*loc0)));
LOC29[1] = rdloc_538188_839829468((&(*loc0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_152), LOC29, 2);
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), (*loc0).t, (&(*loc0)), NIM_TRUE);
}
LA23: ;
}
LA6: ;
}BeforeRet: ;
}
N_NIMCALL(Ropeobj178006*, addcomma_540464_839829468)(Ropeobj178006* r0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!(r0 == NIM_NIL)) goto LA3;
result0 = r0;
}
goto LA1;
LA3: ;
{
TY533289 LOC6;
Ropeobj178006* LOC7;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC7 = (Ropeobj178006*)0;
LOC7 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC6, 0);
result0 = HEX26_178418_2381377266(r0, LOC7);
}
LA1: ;
return result0;
}
N_NIMCALL(void, genclosurecall_540452_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0) {
Tloc292816 op0;
Ropeobj178006* pl0;
Ttype292840* typ0;
NI length0;
Ropeobj178006* rawproc0;
NimStringDesc* callpattern0;
memset((void*)(&op0), 0, sizeof(op0));
initlocexpr_539283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0));
pl0 = (Ropeobj178006*)0;
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
length0 = sonslen_295351_850551059(ri0);
{
NI i_540613_839829468;
NI HEX3Atmp_541214_839829468;
NI res_541217_839829468;
i_540613_839829468 = (NI)0;
HEX3Atmp_541214_839829468 = (NI)0;
HEX3Atmp_541214_839829468 = (NI)(length0 - ((NI) 1));
res_541217_839829468 = ((NI) 1);
{
while (1) {
if (!(res_541217_839829468 <= HEX3Atmp_541214_839829468)) goto LA3;
i_540613_839829468 = res_541217_839829468;
{
NI LOC6;
Tnode292802* paramtype0;
LOC6 = (NI)0;
LOC6 = sonslen_295327_850551059(typ0);
if (!(i_540613_839829468 < LOC6)) goto LA7;
paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i_540613_839829468];
{
NIM_BOOL LOC11;
Ropeobj178006* LOC20;
LOC11 = (NIM_BOOL)0;
LOC11 = iscompiletimeonly_328706_3876443242((*paramtype0).typ);
if (!!(LOC11)) goto LA12;
{
TY533289 LOC18;
Ropeobj178006* LOC19;
if (!!((pl0 == NIM_NIL))) goto LA16;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC19 = (Ropeobj178006*)0;
LOC19 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC18, 0);
add_178482_2381377266(&pl0, LOC19);
}
LA16: ;
LOC20 = (Ropeobj178006*)0;
LOC20 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[i_540613_839829468], (*paramtype0).kindU.S4.sym, ri0);
add_178482_2381377266(&pl0, LOC20);
}
LA12: ;
}
goto LA4;
LA7: ;
{
Ropeobj178006* LOC28;
{
TY533289 LOC26;
Ropeobj178006* LOC27;
if (!!((pl0 == NIM_NIL))) goto LA24;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC27 = (Ropeobj178006*)0;
LOC27 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC26, 0);
add_178482_2381377266(&pl0, LOC27);
}
LA24: ;
LOC28 = (Ropeobj178006*)0;
LOC28 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[i_540613_839829468]);
add_178482_2381377266(&pl0, LOC28);
}
LA4: ;
res_541217_839829468 += ((NI) 1);
} LA3: ;
}
}
rawproc0 = getrawproctype_540459_839829468(p0, typ0);
{
if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 14))&31U)))!=0)) goto LA31;
callpattern0 = copyString(((NimStringDesc*) &T839829468_492));
}
goto LA29;
LA31: ;
{
callpattern0 = copyString(((NimStringDesc*) &T839829468_493));
}
LA29: ;
{
if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA36;
{
NIM_BOOL LOC40;
LOC40 = (NIM_BOOL)0;
LOC40 = isinvalidreturntype_533550_839829468((*typ0).sons->data[((NI) 0)]);
if (!LOC40) goto LA41;
{
NI LOC45;
TY533289 LOC48;
Ropeobj178006* LOC49;
LOC45 = (NI)0;
LOC45 = sonslen_295351_850551059(ri0);
if (!(((NI) 1) < LOC45)) goto LA46;
memset((void*)LOC48, 0, sizeof(LOC48));
LOC49 = (Ropeobj178006*)0;
LOC49 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC48, 0);
add_178482_2381377266(&pl0, LOC49);
}
LA46: ;
{
NIM_BOOL LOC52;
NIM_BOOL LOC54;
Ropeobj178006* LOC67;
NimStringDesc* LOC68;
TY535235 LOC69;
LOC52 = (NIM_BOOL)0;
LOC52 = ((3 &(1U<<((NU)((*d0).k)&15U)))!=0);
if (LOC52) goto LA53;
LOC54 = (NIM_BOOL)0;
LOC54 = leftappearsonrightside_539329_839829468(le0, ri0);
LOC52 = !(LOC54);
LA53: ;
if (!LOC52) goto LA55;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA59;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE);
}
goto LA57;
LA59: ;
{
NIM_BOOL LOC62;
NIM_BOOL LOC64;
LOC62 = (NIM_BOOL)0;
LOC62 = !(((66 &(1U<<((NU)((*d0).k)&15U)))!=0));
if (!(LOC62)) goto LA63;
LOC64 = (NIM_BOOL)0;
LOC64 = hasnoinit_539383_839829468(ri0);
LOC62 = !(LOC64);
LA63: ;
if (!LOC62) goto LA65;
resetloc_538350_839829468(p0, d0);
}
goto LA57;
LA65: ;
LA57: ;
LOC67 = (Ropeobj178006*)0;
LOC67 = addrloc_538204_839829468((&(*d0)));
add_178482_2381377266(&pl0, LOC67);
LOC68 = (NimStringDesc*)0;
LOC68 = rawNewString(callpattern0->Sup.len + 3);
appendString(LOC68, callpattern0);
appendString(LOC68, ((NimStringDesc*) &T839829468_497));
memset((void*)LOC69, 0, sizeof(LOC69));
LOC69[0] = op0.r;
LOC69[1] = pl0;
LOC69[2] = addcomma_540464_839829468(pl0);
LOC69[3] = rawproc0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC68, LOC69, 4);
}
goto LA50;
LA55: ;
{
Tloc292816 tmp0;
Ropeobj178006* LOC71;
NimStringDesc* LOC72;
TY535235 LOC73;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE);
LOC71 = (Ropeobj178006*)0;
LOC71 = addrloc_538204_839829468((&tmp0));
add_178482_2381377266(&pl0, LOC71);
LOC72 = (NimStringDesc*)0;
LOC72 = rawNewString(callpattern0->Sup.len + 3);
appendString(LOC72, callpattern0);
appendString(LOC72, ((NimStringDesc*) &T839829468_497));
memset((void*)LOC73, 0, sizeof(LOC73));
LOC73[0] = op0.r;
LOC73[1] = pl0;
LOC73[2] = addcomma_540464_839829468(pl0);
LOC73[3] = rawproc0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC72, LOC73, 4);
genassignment_539264_839829468(p0, (&(*d0)), (&tmp0), 0);
}
LA50: ;
}
goto LA38;
LA41: ;
{
Tloc292816 list0;
TY535235 LOC79;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA77;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE);
}
LA77: ;
memset((void*)(&list0), 0, sizeof(list0));
initloc_532273_839829468((&list0), ((Tlockind292808) 9), (*d0).t, ((Tstorageloc292812) 0));
memset((void*)LOC79, 0, sizeof(LOC79));
LOC79[0] = op0.r;
LOC79[1] = pl0;
LOC79[2] = addcomma_540464_839829468(pl0);
LOC79[3] = rawproc0;
list0.r = HEX25_178905_2381377266(callpattern0, LOC79, 4);
genassignment_539264_839829468(p0, (&(*d0)), (&list0), 0);
}
LA38: ;
}
goto LA34;
LA36: ;
{
NimStringDesc* LOC81;
TY535235 LOC82;
LOC81 = (NimStringDesc*)0;
LOC81 = rawNewString(callpattern0->Sup.len + 3);
appendString(LOC81, callpattern0);
appendString(LOC81, ((NimStringDesc*) &T839829468_497));
memset((void*)LOC82, 0, sizeof(LOC82));
LOC82[0] = op0.r;
LOC82[1] = pl0;
LOC82[2] = addcomma_540464_839829468(pl0);
LOC82[3] = rawproc0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC81, LOC82, 4);
}
LA34: ;
}
N_NIMCALL(Ropeobj178006*, genotherarg_539277_839829468)(Tcproc529021* p0, Tnode292802* ri0, NI i0, Ttype292840* typ0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NI LOC3;
Tnode292802* paramtype0;
LOC3 = (NI)0;
LOC3 = sonslen_295327_850551059(typ0);
if (!(i0 < LOC3)) goto LA4;
paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i0];
{
NIM_BOOL LOC8;
LOC8 = (NIM_BOOL)0;
LOC8 = iscompiletimeonly_328706_3876443242((*paramtype0).typ);
if (!LOC8) goto LA9;
result0 = NIM_NIL;
}
goto LA6;
LA9: ;
{
NIM_BOOL LOC12;
Tnode292802* LOC16;
LOC12 = (NIM_BOOL)0;
LOC12 = ((*(*typ0).sons->data[i0]).kind == ((Ttypekind292244) 23));
if (!(LOC12)) goto LA13;
LOC12 = ((*(*ri0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 64));
LA13: ;
if (!LOC12) goto LA14;
LOC16 = (Tnode292802*)0;
LOC16 = HEX5BHEX5D_293238_850551059((*ri0).kindU.S6.sons->data[i0], ((NI) 0));
result0 = genargnoparam_539938_839829468(p0, LOC16);
}
goto LA6;
LA14: ;
{
result0 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[i0]);
}
LA6: ;
}
goto LA1;
LA4: ;
{
{
if (!!((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0))) goto LA21;
localerror_196085_155036129((*ri0).info, ((NimStringDesc*) &T839829468_501));
result0 = NIM_NIL;
}
goto LA19;
LA21: ;
{
result0 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[i0]);
}
LA19: ;
}
LA1: ;
return result0;
}
N_NIMCALL(Tnode292802*, skipaddrderef_541433_839829468)(Tnode292802* node0) {
Tnode292802* result0;
Tnode292802* n0;
NIM_BOOL isaddr0;
{ result0 = (Tnode292802*)0;
n0 = node0;
isaddr0 = NIM_FALSE;
switch ((*n0).kind) {
case ((Tnodekind292020) 63):
case ((Tnodekind292020) 64):
{
n0 = (*n0).kindU.S6.sons->data[((NI) 0)];
isaddr0 = NIM_TRUE;
}
break;
case ((Tnodekind292020) 47):
case ((Tnodekind292020) 65):
{
n0 = (*n0).kindU.S6.sons->data[((NI) 0)];
}
break;
default:
{
result0 = n0;
goto BeforeRet;
}
break;
}
{
if (!((*n0).kind == ((Tnodekind292020) 66))) goto LA6;
n0 = (*n0).kindU.S6.sons->data[((NI) 0)];
}
LA6: ;
{
NIM_BOOL LOC10;
LOC10 = (NIM_BOOL)0;
LOC10 = isaddr0;
if (!(LOC10)) goto LA11;
LOC10 = ((*n0).kind == ((Tnodekind292020) 47) || (*n0).kind == ((Tnodekind292020) 65));
LA11: ;
if (!LOC10) goto LA12;
result0 = (*n0).kindU.S6.sons->data[((NI) 0)];
}
goto LA8;
LA12: ;
{
if (!((*n0).kind == ((Tnodekind292020) 63) || (*n0).kind == ((Tnodekind292020) 64))) goto LA15;
result0 = (*n0).kindU.S6.sons->data[((NI) 0)];
}
goto LA8;
LA15: ;
{
result0 = node0;
}
LA8: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genthisarg_541475_839829468)(Tcproc529021* p0, Tnode292802* ri_541478_839829468, NI i0, Ttype292840* typ0) {
Ropeobj178006* result0;
Tnode292802* ri0;
Ttype292840* t0;
result0 = (Ropeobj178006*)0;
{
NI LOC3;
NimStringDesc* LOC6;
LOC3 = (NI)0;
LOC3 = sonslen_295327_850551059(typ0);
if (!!((i0 < LOC3))) goto LA4;
LOC6 = (NimStringDesc*)0;
LOC6 = HEX24_196185_1689653243(T839829468_503);
internalerror_196113_155036129(LOC6);
}
LA4: ;
ri0 = HEX5BHEX5D_293238_850551059(ri_541478_839829468, i0);
{
while (1) {
if (!((*ri0).kind == ((Tnodekind292020) 66))) goto LA8;
ri0 = HEX5BHEX5D_293238_850551059(ri0, ((NI) 0));
} LA8: ;
}
t0 = skiptypes_296099_850551059((*typ0).sons->data[i0], 2048);
{
Tnode292802* x0;
if (!((*t0).kind == ((Ttypekind292244) 23))) goto LA11;
{
if (!((*ri0).kind == ((Tnodekind292020) 64))) goto LA15;
x0 = HEX5BHEX5D_293238_850551059(ri0, ((NI) 0));
}
goto LA13;
LA15: ;
{
x0 = ri0;
}
LA13: ;
{
if (!((*(*x0).typ).kind == ((Ttypekind292244) 21))) goto LA20;
result0 = genargnoparam_539938_839829468(p0, x0);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_504));
}
goto LA18;
LA20: ;
{
NIM_BOOL LOC23;
Tnode292802* LOC25;
Tnode292802* LOC28;
LOC23 = (NIM_BOOL)0;
LOC23 = ((*x0).kind == ((Tnodekind292020) 65) || (*x0).kind == ((Tnodekind292020) 47));
if (!(LOC23)) goto LA24;
LOC25 = (Tnode292802*)0;
LOC25 = HEX5BHEX5D_293238_850551059(x0, ((NI) 0));
LOC23 = ((*(*LOC25).typ).kind == ((Ttypekind292244) 21));
LA24: ;
if (!LOC23) goto LA26;
LOC28 = (Tnode292802*)0;
LOC28 = HEX5BHEX5D_293238_850551059(x0, ((NI) 0));
result0 = genargnoparam_539938_839829468(p0, LOC28);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_504));
}
goto LA18;
LA26: ;
{
result0 = genargnoparam_539938_839829468(p0, x0);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_257));
}
LA18: ;
}
goto LA9;
LA11: ;
{
if (!((*t0).kind == ((Ttypekind292244) 21))) goto LA31;
{
Tnode292802* LOC37;
if (!((*ri0).kind == ((Tnodekind292020) 63) || (*ri0).kind == ((Tnodekind292020) 64))) goto LA35;
LOC37 = (Tnode292802*)0;
LOC37 = HEX5BHEX5D_293238_850551059(ri0, ((NI) 0));
result0 = genargnoparam_539938_839829468(p0, LOC37);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_257));
}
goto LA33;
LA35: ;
{
result0 = genargnoparam_539938_839829468(p0, ri0);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_504));
}
LA33: ;
}
goto LA9;
LA31: ;
{
ri0 = skipaddrderef_541433_839829468(ri0);
{
if (!((*ri0).kind == ((Tnodekind292020) 63) || (*ri0).kind == ((Tnodekind292020) 64))) goto LA42;
ri0 = HEX5BHEX5D_293238_850551059(ri0, ((NI) 0));
}
LA42: ;
result0 = genargnoparam_539938_839829468(p0, ri0);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_257));
}
LA9: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genpatterncall_541699_839829468)(Tcproc529021* p0, Tnode292802* ri_541702_839829468, NimStringDesc* pat0, Ttype292840* typ_541704_839829468) {
Ropeobj178006* result0;
NI i0;
NI j0;
result0 = (Ropeobj178006*)0;
i0 = ((NI) 0);
j0 = ((NI) 1);
{
while (1) {
if (!(i0 < (pat0 ? pat0->Sup.len : 0))) goto LA2;
switch (((NU8)(pat0->data[i0]))) {
case 64:
{
{
NI LOC6;
Ropeobj178006* LOC9;
LOC6 = (NI)0;
LOC6 = len_293081_850551059(ri_541702_839829468);
if (!(j0 < LOC6)) goto LA7;
LOC9 = (Ropeobj178006*)0;
LOC9 = genotherarg_539277_839829468(p0, ri_541702_839829468, j0, typ_541704_839829468);
add_178482_2381377266(&result0, LOC9);
{
NI k_541728_839829468;
NI HEX3Atmp_541904_839829468;
NI HEX3Atmp_541905_839829468;
NI LOC11;
NI res_541908_839829468;
k_541728_839829468 = (NI)0;
HEX3Atmp_541904_839829468 = (NI)0;
HEX3Atmp_541905_839829468 = (NI)0;
HEX3Atmp_541904_839829468 = (NI)(j0 + ((NI) 1));
LOC11 = (NI)0;
LOC11 = len_293081_850551059(ri_541702_839829468);
HEX3Atmp_541905_839829468 = (LOC11 - 1);
res_541908_839829468 = HEX3Atmp_541904_839829468;
{
while (1) {
TY533289 LOC14;
Ropeobj178006* LOC15;
Ropeobj178006* LOC16;
if (!(res_541908_839829468 <= HEX3Atmp_541905_839829468)) goto LA13;
k_541728_839829468 = res_541908_839829468;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC15 = (Ropeobj178006*)0;
LOC15 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC14, 0);
add_178482_2381377266(&result0, LOC15);
LOC16 = (Ropeobj178006*)0;
LOC16 = genotherarg_539277_839829468(p0, ri_541702_839829468, k_541728_839829468, typ_541704_839829468);
add_178482_2381377266(&result0, LOC16);
res_541908_839829468 += ((NI) 1);
} LA13: ;
}
}
}
LA7: ;
i0 += ((NI) 1);
}
break;
case 35:
{
{
Tnode292802* ri0;
if (!(((NU8)(pat0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(43)) || ((NU8)(pat0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(64)))) goto LA20;
ri0 = HEX5BHEX5D_293238_850551059(ri_541702_839829468, j0);
{
Ttype292840* typ0;
TY533289 LOC31;
Ropeobj178006* LOC32;
TY533289 LOC46;
Ropeobj178006* LOC47;
if (!((*ri0).kind == ((Tnodekind292020) 27) || (*ri0).kind == ((Tnodekind292020) 29) || (*ri0).kind == ((Tnodekind292020) 30) || (*ri0).kind == ((Tnodekind292020) 31) || (*ri0).kind == ((Tnodekind292020) 26) || (*ri0).kind == ((Tnodekind292020) 28) || (*ri0).kind == ((Tnodekind292020) 32))) goto LA24;
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
Ropeobj178006* LOC30;
if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(43))) goto LA28;
LOC30 = (Ropeobj178006*)0;
LOC30 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)]);
add_178482_2381377266(&result0, LOC30);
}
LA28: ;
memset((void*)LOC31, 0, sizeof(LOC31));
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_118), LOC31, 0);
add_178482_2381377266(&result0, LOC32);
{
NI LOC35;
Ropeobj178006* LOC38;
LOC35 = (NI)0;
LOC35 = len_293081_850551059(ri0);
if (!(((NI) 1) < LOC35)) goto LA36;
LOC38 = (Ropeobj178006*)0;
LOC38 = genotherarg_539277_839829468(p0, ri0, ((NI) 1), typ0);
add_178482_2381377266(&result0, LOC38);
}
LA36: ;
{
NI k_541793_839829468;
NI HEX3Atmp_541915_839829468;
NI HEX3Atmp_541916_839829468;
NI LOC40;
NI res_541919_839829468;
k_541793_839829468 = (NI)0;
HEX3Atmp_541915_839829468 = (NI)0;
HEX3Atmp_541916_839829468 = (NI)0;
HEX3Atmp_541915_839829468 = (NI)(j0 + ((NI) 1));
LOC40 = (NI)0;
LOC40 = len_293081_850551059(ri0);
HEX3Atmp_541916_839829468 = (LOC40 - 1);
res_541919_839829468 = HEX3Atmp_541915_839829468;
{
while (1) {
TY533289 LOC43;
Ropeobj178006* LOC44;
Ropeobj178006* LOC45;
if (!(res_541919_839829468 <= HEX3Atmp_541916_839829468)) goto LA42;
k_541793_839829468 = res_541919_839829468;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC44 = (Ropeobj178006*)0;
LOC44 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC43, 0);
add_178482_2381377266(&result0, LOC44);
LOC45 = (Ropeobj178006*)0;
LOC45 = genotherarg_539277_839829468(p0, ri0, k_541793_839829468, typ0);
add_178482_2381377266(&result0, LOC45);
res_541919_839829468 += ((NI) 1);
} LA42: ;
}
}
memset((void*)LOC46, 0, sizeof(LOC46));
LOC47 = (Ropeobj178006*)0;
LOC47 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_117), LOC46, 0);
add_178482_2381377266(&result0, LOC47);
}
goto LA22;
LA24: ;
{
localerror_196085_155036129((*ri0).info, ((NimStringDesc*) &T839829468_502));
}
LA22: ;
i0 += ((NI) 1);
}
goto LA18;
LA20: ;
{
Ropeobj178006* LOC52;
if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(46))) goto LA50;
LOC52 = (Ropeobj178006*)0;
LOC52 = genthisarg_541475_839829468(p0, ri_541702_839829468, j0, typ_541704_839829468);
add_178482_2381377266(&result0, LOC52);
i0 += ((NI) 1);
}
goto LA18;
LA50: ;
{
Tnode292802* arg0;
Ropeobj178006* LOC58;
if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(91))) goto LA54;
arg0 = skipaddrderef_541433_839829468((*ri_541702_839829468).kindU.S6.sons->data[j0]);
{
while (1) {
if (!((*arg0).kind == ((Tnodekind292020) 63) || (*arg0).kind == ((Tnodekind292020) 64) || (*arg0).kind == ((Tnodekind292020) 66))) goto LA57;
arg0 = HEX5BHEX5D_293238_850551059(arg0, ((NI) 0));
} LA57: ;
}
LOC58 = (Ropeobj178006*)0;
LOC58 = genargnoparam_539938_839829468(p0, arg0);
add_178482_2381377266(&result0, LOC58);
}
goto LA18;
LA54: ;
{
Ropeobj178006* LOC60;
LOC60 = (Ropeobj178006*)0;
LOC60 = genotherarg_539277_839829468(p0, ri_541702_839829468, j0, typ_541704_839829468);
add_178482_2381377266(&result0, LOC60);
}
LA18: ;
j0 += ((NI) 1);
i0 += ((NI) 1);
}
break;
case 39:
{
NI idx0;
NI stars0;
idx0 = (NI)0;
stars0 = (NI)0;
{
NIM_BOOL LOC64;
Ttype292840* t0;
LOC64 = (NIM_BOOL)0;
LOC64 = scancppgenericslot_534827_839829468(pat0, (&i0), (&idx0), (&stars0));
if (!LOC64) goto LA65;
t0 = resolvestarsincpptype_534891_839829468(typ_541704_839829468, idx0, stars0);
{
TY533289 LOC71;
Ropeobj178006* LOC72;
if (!(t0 == NIM_NIL)) goto LA69;
memset((void*)LOC71, 0, sizeof(LOC71));
LOC72 = (Ropeobj178006*)0;
LOC72 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_26), LOC71, 0);
add_178482_2381377266(&result0, LOC72);
}
goto LA67;
LA69: ;
{
Ropeobj178006* LOC74;
LOC74 = (Ropeobj178006*)0;
LOC74 = gettypedesc_535673_839829468((*p0).module, t0);
add_178482_2381377266(&result0, LOC74);
}
LA67: ;
}
LA65: ;
}
break;
default:
{
NI start0;
start0 = i0;
{
while (1) {
if (!(i0 < (pat0 ? pat0->Sup.len : 0))) goto LA77;
{
if (!!((((NU8)(pat0->data[i0])) == ((NU8)(64)) || ((NU8)(pat0->data[i0])) == ((NU8)(35)) || ((NU8)(pat0->data[i0])) == ((NU8)(39))))) goto LA80;
i0 += ((NI) 1);
}
goto LA78;
LA80: ;
{
goto LA76;
}
LA78: ;
} LA77: ;
} LA76: ;
{
NimStringDesc* LOC87;
if (!(start0 <= (NI)(i0 - ((NI) 1)))) goto LA85;
LOC87 = (NimStringDesc*)0;
LOC87 = copyStrLast(pat0, start0, (NI)(i0 - ((NI) 1)));
add_178487_2381377266(&result0, LOC87);
}
LA85: ;
}
break;
}
} LA2: ;
}
return result0;
}
N_NIMCALL(void, fixupcall_539410_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0, Ropeobj178006* callee0, Ropeobj178006* params0) {
Ropeobj178006* pl0;
TY533289 LOC1;
Ropeobj178006* LOC2;
Ropeobj178006* LOC3;
Ttype292840* typ0;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_118), LOC1, 0);
LOC3 = (Ropeobj178006*)0;
LOC3 = HEX26_178418_2381377266(callee0, LOC2);
pl0 = HEX26_178418_2381377266(LOC3, params0);
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA6;
{
NIM_BOOL LOC10;
LOC10 = (NIM_BOOL)0;
LOC10 = isinvalidreturntype_533550_839829468((*typ0).sons->data[((NI) 0)]);
if (!LOC10) goto LA11;
{
TY533289 LOC17;
Ropeobj178006* LOC18;
if (!!((params0 == NIM_NIL))) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC18 = (Ropeobj178006*)0;
LOC18 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC17, 0);
add_178482_2381377266(&pl0, LOC18);
}
LA15: ;
{
NIM_BOOL LOC21;
NIM_BOOL LOC23;
Ropeobj178006* LOC36;
TY533289 LOC37;
Ropeobj178006* LOC38;
LOC21 = (NIM_BOOL)0;
LOC21 = ((3 &(1U<<((NU)((*d0).k)&15U)))!=0);
if (LOC21) goto LA22;
LOC23 = (NIM_BOOL)0;
LOC23 = leftappearsonrightside_539329_839829468(le0, ri0);
LOC21 = !(LOC23);
LA22: ;
if (!LOC21) goto LA24;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA28;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE);
}
goto LA26;
LA28: ;
{
NIM_BOOL LOC31;
NIM_BOOL LOC33;
LOC31 = (NIM_BOOL)0;
LOC31 = !(((66 &(1U<<((NU)((*d0).k)&15U)))!=0));
if (!(LOC31)) goto LA32;
LOC33 = (NIM_BOOL)0;
LOC33 = hasnoinit_539383_839829468(ri0);
LOC31 = !(LOC33);
LA32: ;
if (!LOC31) goto LA34;
resetloc_538350_839829468(p0, d0);
}
goto LA26;
LA34: ;
LA26: ;
LOC36 = (Ropeobj178006*)0;
LOC36 = addrloc_538204_839829468((&(*d0)));
add_178482_2381377266(&pl0, LOC36);
memset((void*)LOC37, 0, sizeof(LOC37));
LOC38 = (Ropeobj178006*)0;
LOC38 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_505), LOC37, 0);
add_178482_2381377266(&pl0, LOC38);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
}
goto LA19;
LA24: ;
{
Tloc292816 tmp0;
Ropeobj178006* LOC40;
TY533289 LOC41;
Ropeobj178006* LOC42;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE);
LOC40 = (Ropeobj178006*)0;
LOC40 = addrloc_538204_839829468((&tmp0));
add_178482_2381377266(&pl0, LOC40);
memset((void*)LOC41, 0, sizeof(LOC41));
LOC42 = (Ropeobj178006*)0;
LOC42 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_505), LOC41, 0);
add_178482_2381377266(&pl0, LOC42);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
genassignment_539264_839829468(p0, (&(*d0)), (&tmp0), 0);
}
LA19: ;
}
goto LA8;
LA11: ;
{
TY533289 LOC44;
Ropeobj178006* LOC45;
memset((void*)LOC44, 0, sizeof(LOC44));
LOC45 = (Ropeobj178006*)0;
LOC45 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_117), LOC44, 0);
add_178482_2381377266(&pl0, LOC45);
{
NIM_BOOL LOC48;
NIM_BOOL LOC49;
LOC48 = (NIM_BOOL)0;
LOC49 = (NIM_BOOL)0;
LOC49 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC49) goto LA50;
LOC49 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA50: ;
LOC48 = LOC49;
if (!(LOC48)) goto LA51;
LOC48 = (((*d0).flags &(1U<<((NU)(((Tlocflag292810) 8))&15U)))!=0);
LA51: ;
if (!LOC48) goto LA52;
(*d0).k = ((Tlockind292808) 9);
unsureAsgnRef((void**) (&(*d0).r), pl0);
(*d0).flags &= ~(((NU16)1) << ((((Tlocflag292810) 8)) % (sizeof(NU16)*8)));
}
goto LA46;
LA52: ;
{
Tloc292816 list0;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA57;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE);
}
LA57: ;
memset((void*)(&list0), 0, sizeof(list0));
initloc_532273_839829468((&list0), ((Tlockind292808) 9), (*d0).t, ((Tstorageloc292812) 0));
list0.r = pl0;
genassignment_539264_839829468(p0, (&(*d0)), (&list0), 0);
}
LA46: ;
}
LA8: ;
}
goto LA4;
LA6: ;
{
TY533289 LOC60;
Ropeobj178006* LOC61;
memset((void*)LOC60, 0, sizeof(LOC60));
LOC61 = (Ropeobj178006*)0;
LOC61 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_505), LOC60, 0);
add_178482_2381377266(&pl0, LOC61);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
}
LA4: ;
}
N_NIMCALL(void, geninfixcall_541929_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0) {
Tloc292816 op0;
Ttype292840* typ_541940_839829468;
NI length0;
NimStringDesc* pat0;
memset((void*)(&op0), 0, sizeof(op0));
initlocexpr_539283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0));
typ_541940_839829468 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
length0 = sonslen_295351_850551059(ri0);
pat0 = (*(*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).loc.r).data;
{
NimStringDesc* LOC5;
if (!!(!((pat0 == NIM_NIL)))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = HEX24_196185_1689653243(T839829468_498);
internalerror_196113_155036129(LOC5);
}
LA3: ;
{
NIM_BOOL LOC8;
Ropeobj178006* pl0;
Ttype292840* typ0;
LOC8 = (NIM_BOOL)0;
LOC8 = contains_110056_4286263276(pat0, T839829468_500);
if (!LOC8) goto LA9;
pl0 = genpatterncall_541699_839829468(p0, ri0, pat0, typ_541940_839829468);
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA13;
{
NIM_BOOL LOC17;
NIM_BOOL LOC18;
LOC17 = (NIM_BOOL)0;
LOC18 = (NIM_BOOL)0;
LOC18 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC18) goto LA19;
LOC18 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA19: ;
LOC17 = LOC18;
if (!(LOC17)) goto LA20;
LOC17 = (((*d0).flags &(1U<<((NU)(((Tlocflag292810) 8))&15U)))!=0);
LA20: ;
if (!LOC17) goto LA21;
(*d0).k = ((Tlockind292808) 9);
unsureAsgnRef((void**) (&(*d0).r), pl0);
(*d0).flags &= ~(((NU16)1) << ((((Tlocflag292810) 8)) % (sizeof(NU16)*8)));
}
goto LA15;
LA21: ;
{
Tloc292816 list0;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA26;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE);
}
LA26: ;
memset((void*)(&list0), 0, sizeof(list0));
initloc_532273_839829468((&list0), ((Tlockind292808) 9), (*d0).t, ((Tstorageloc292812) 0));
list0.r = pl0;
genassignment_539264_839829468(p0, (&(*d0)), (&list0), 0);
}
LA15: ;
}
goto LA11;
LA13: ;
{
TY533289 LOC29;
Ropeobj178006* LOC30;
memset((void*)LOC29, 0, sizeof(LOC29));
LOC30 = (Ropeobj178006*)0;
LOC30 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_497), LOC29, 0);
add_178482_2381377266(&pl0, LOC30);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
}
LA11: ;
}
goto LA6;
LA9: ;
{
Ropeobj178006* pl0;
Ropeobj178006* params0;
pl0 = NIM_NIL;
{
NI LOC34;
Ropeobj178006* LOC37;
LOC34 = (NI)0;
LOC34 = len_293081_850551059(ri0);
if (!(((NI) 1) < LOC34)) goto LA35;
LOC37 = (Ropeobj178006*)0;
LOC37 = genthisarg_541475_839829468(p0, ri0, ((NI) 1), typ_541940_839829468);
add_178482_2381377266(&pl0, LOC37);
}
LA35: ;
add_178482_2381377266(&pl0, op0.r);
params0 = (Ropeobj178006*)0;
{
NI i_542425_839829468;
NI HEX3Atmp_542609_839829468;
NI res_542612_839829468;
i_542425_839829468 = (NI)0;
HEX3Atmp_542609_839829468 = (NI)0;
HEX3Atmp_542609_839829468 = (NI)(length0 - ((NI) 1));
res_542612_839829468 = ((NI) 2);
{
while (1) {
Ropeobj178006* LOC47;
if (!(res_542612_839829468 <= HEX3Atmp_542609_839829468)) goto LA40;
i_542425_839829468 = res_542612_839829468;
{
TY533289 LOC45;
Ropeobj178006* LOC46;
if (!!((params0 == NIM_NIL))) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC46 = (Ropeobj178006*)0;
LOC46 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC45, 0);
add_178482_2381377266(¶ms0, LOC46);
}
LA43: ;
LOC47 = (Ropeobj178006*)0;
LOC47 = genotherarg_539277_839829468(p0, ri0, i_542425_839829468, typ_541940_839829468);
add_178482_2381377266(¶ms0, LOC47);
res_542612_839829468 += ((NI) 1);
} LA40: ;
}
}
fixupcall_539410_839829468(p0, le0, ri0, d0, pl0, params0);
}
LA6: ;
}
N_NIMCALL(void, gennamedparamcall_542616_839829468)(Tcproc529021* p0, Tnode292802* ri0, Tloc292816* d0) {
Tloc292816 op0;
Ropeobj178006* pl0;
TY533289 LOC1;
Ttype292840* typ0;
NI length0;
NimStringDesc* pat0;
NI start0;
memset((void*)(&op0), 0, sizeof(op0));
initlocexpr_539283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0));
memset((void*)LOC1, 0, sizeof(LOC1));
pl0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_506), LOC1, 0);
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
length0 = sonslen_295351_850551059(ri0);
pat0 = (*(*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).loc.r).data;
{
NimStringDesc* LOC6;
if (!!(!((pat0 == NIM_NIL)))) goto LA4;
LOC6 = (NimStringDesc*)0;
LOC6 = HEX24_196185_1689653243(T839829468_507);
internalerror_196113_155036129(LOC6);
}
LA4: ;
start0 = ((NI) 3);
{
NIM_BOOL LOC9;
LOC9 = (NIM_BOOL)0;
LOC9 = contains_110046_4286263276(pat0, 32);
if (!LOC9) goto LA10;
start0 = ((NI) 1);
add_178482_2381377266(&pl0, op0.r);
{
TY533289 LOC16;
Ropeobj178006* LOC17;
Ropeobj178006* LOC18;
if (!(((NI) 1) < length0)) goto LA14;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC17 = (Ropeobj178006*)0;
LOC17 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_244), LOC16, 0);
add_178482_2381377266(&pl0, LOC17);
LOC18 = (Ropeobj178006*)0;
LOC18 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 1)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym, ri0);
add_178482_2381377266(&pl0, LOC18);
start0 = ((NI) 2);
}
LA14: ;
}
goto LA7;
LA10: ;
{
{
Ropeobj178006* LOC24;
TY533289 LOC25;
Ropeobj178006* LOC26;
if (!(((NI) 1) < length0)) goto LA22;
LOC24 = (Ropeobj178006*)0;
LOC24 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 1)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym, ri0);
add_178482_2381377266(&pl0, LOC24);
memset((void*)LOC25, 0, sizeof(LOC25));
LOC26 = (Ropeobj178006*)0;
LOC26 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_111), LOC25, 0);
add_178482_2381377266(&pl0, LOC26);
}
LA22: ;
add_178482_2381377266(&pl0, op0.r);
{
TY533289 LOC31;
Ropeobj178006* LOC32;
Ropeobj178006* LOC33;
if (!(((NI) 2) < length0)) goto LA29;
memset((void*)LOC31, 0, sizeof(LOC31));
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_244), LOC31, 0);
add_178482_2381377266(&pl0, LOC32);
LOC33 = (Ropeobj178006*)0;
LOC33 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 2)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 2)]).kindU.S4.sym, ri0);
add_178482_2381377266(&pl0, LOC33);
}
LA29: ;
}
LA7: ;
{
NI i_543051_839829468;
NI HEX3Atmp_543617_839829468;
NI res_543620_839829468;
i_543051_839829468 = (NI)0;
HEX3Atmp_543617_839829468 = (NI)0;
HEX3Atmp_543617_839829468 = (NI)(length0 - ((NI) 1));
res_543620_839829468 = start0;
{
while (1) {
Tsym292834* param0;
TY533289 LOC42;
Ropeobj178006* LOC43;
TY533289 LOC44;
Ropeobj178006* LOC45;
Ropeobj178006* LOC46;
if (!(res_543620_839829468 <= HEX3Atmp_543617_839829468)) goto LA36;
i_543051_839829468 = res_543620_839829468;
{
NI LOC39;
LOC39 = (NI)0;
LOC39 = sonslen_295327_850551059(typ0);
if (!(LOC39 <= i_543051_839829468)) goto LA40;
internalerror_196100_155036129((*ri0).info, ((NimStringDesc*) &T839829468_508));
}
LA40: ;
param0 = (*(*(*typ0).n).kindU.S6.sons->data[i_543051_839829468]).kindU.S4.sym;
memset((void*)LOC42, 0, sizeof(LOC42));
LOC43 = (Ropeobj178006*)0;
LOC43 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_111), LOC42, 0);
add_178482_2381377266(&pl0, LOC43);
add_178487_2381377266(&pl0, (*(*param0).name).s);
memset((void*)LOC44, 0, sizeof(LOC44));
LOC45 = (Ropeobj178006*)0;
LOC45 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_244), LOC44, 0);
add_178482_2381377266(&pl0, LOC45);
LOC46 = (Ropeobj178006*)0;
LOC46 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[i_543051_839829468], param0, ri0);
add_178482_2381377266(&pl0, LOC46);
res_543620_839829468 += ((NI) 1);
} LA36: ;
}
}
{
if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA49;
{
NIM_BOOL LOC53;
LOC53 = (NIM_BOOL)0;
LOC53 = isinvalidreturntype_533550_839829468((*typ0).sons->data[((NI) 0)]);
if (!LOC53) goto LA54;
{
NI LOC58;
TY533289 LOC61;
Ropeobj178006* LOC62;
LOC58 = (NI)0;
LOC58 = sonslen_295351_850551059(ri0);
if (!(((NI) 1) < LOC58)) goto LA59;
memset((void*)LOC61, 0, sizeof(LOC61));
LOC62 = (Ropeobj178006*)0;
LOC62 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_111), LOC61, 0);
add_178482_2381377266(&pl0, LOC62);
}
LA59: ;
{
TY533289 LOC71;
Ropeobj178006* LOC72;
Ropeobj178006* LOC73;
TY533289 LOC74;
Ropeobj178006* LOC75;
if (!((3 &(1U<<((NU)((*d0).k)&15U)))!=0)) goto LA65;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA69;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE);
}
LA69: ;
memset((void*)LOC71, 0, sizeof(LOC71));
LOC72 = (Ropeobj178006*)0;
LOC72 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_509), LOC71, 0);
add_178482_2381377266(&pl0, LOC72);
LOC73 = (Ropeobj178006*)0;
LOC73 = addrloc_538204_839829468((&(*d0)));
add_178482_2381377266(&pl0, LOC73);
memset((void*)LOC74, 0, sizeof(LOC74));
LOC75 = (Ropeobj178006*)0;
LOC75 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_510), LOC74, 0);
add_178482_2381377266(&pl0, LOC75);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
}
goto LA63;
LA65: ;
{
Tloc292816 tmp0;
Ropeobj178006* LOC77;
TY533289 LOC78;
Ropeobj178006* LOC79;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE);
LOC77 = (Ropeobj178006*)0;
LOC77 = addrloc_538204_839829468((&tmp0));
add_178482_2381377266(&pl0, LOC77);
memset((void*)LOC78, 0, sizeof(LOC78));
LOC79 = (Ropeobj178006*)0;
LOC79 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_510), LOC78, 0);
add_178482_2381377266(&pl0, LOC79);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
genassignment_539264_839829468(p0, (&(*d0)), (&tmp0), 0);
}
LA63: ;
}
goto LA51;
LA54: ;
{
TY533289 LOC81;
Ropeobj178006* LOC82;
Tloc292816 list0;
memset((void*)LOC81, 0, sizeof(LOC81));
LOC82 = (Ropeobj178006*)0;
LOC82 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_511), LOC81, 0);
add_178482_2381377266(&pl0, LOC82);
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA85;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE);
}
LA85: ;
memset((void*)(&list0), 0, sizeof(list0));
initloc_532273_839829468((&list0), ((Tlockind292808) 9), NIM_NIL, ((Tstorageloc292812) 0));
list0.r = pl0;
genassignment_539264_839829468(p0, (&(*d0)), (&list0), 0);
}
LA51: ;
}
goto LA47;
LA49: ;
{
TY533289 LOC88;
Ropeobj178006* LOC89;
memset((void*)LOC88, 0, sizeof(LOC88));
LOC89 = (Ropeobj178006*)0;
LOC89 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_510), LOC88, 0);
add_178482_2381377266(&pl0, LOC89);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
}
LA47: ;
}
N_NIMCALL(void, genprefixcall_539960_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0) {
Tloc292816 op0;
Ropeobj178006* params0;
Ttype292840* typ0;
NI length0;
memset((void*)(&op0), 0, sizeof(op0));
initlocexpr_539283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0));
params0 = (Ropeobj178006*)0;
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
length0 = sonslen_295351_850551059(ri0);
{
NI i_540213_839829468;
NI HEX3Atmp_540445_839829468;
NI res_540448_839829468;
i_540213_839829468 = (NI)0;
HEX3Atmp_540445_839829468 = (NI)0;
HEX3Atmp_540445_839829468 = (NI)(length0 - ((NI) 1));
res_540448_839829468 = ((NI) 1);
{
while (1) {
if (!(res_540448_839829468 <= HEX3Atmp_540445_839829468)) goto LA3;
i_540213_839829468 = res_540448_839829468;
{
NI LOC6;
Tnode292802* paramtype0;
LOC6 = (NI)0;
LOC6 = sonslen_295327_850551059(typ0);
if (!(i_540213_839829468 < LOC6)) goto LA7;
paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i_540213_839829468];
{
NIM_BOOL LOC11;
Ropeobj178006* LOC20;
LOC11 = (NIM_BOOL)0;
LOC11 = iscompiletimeonly_328706_3876443242((*paramtype0).typ);
if (!!(LOC11)) goto LA12;
{
TY533289 LOC18;
Ropeobj178006* LOC19;
if (!!((params0 == NIM_NIL))) goto LA16;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC19 = (Ropeobj178006*)0;
LOC19 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC18, 0);
add_178482_2381377266(¶ms0, LOC19);
}
LA16: ;
LOC20 = (Ropeobj178006*)0;
LOC20 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[i_540213_839829468], (*paramtype0).kindU.S4.sym, ri0);
add_178482_2381377266(¶ms0, LOC20);
}
LA12: ;
}
goto LA4;
LA7: ;
{
Ropeobj178006* LOC28;
{
TY533289 LOC26;
Ropeobj178006* LOC27;
if (!!((params0 == NIM_NIL))) goto LA24;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC27 = (Ropeobj178006*)0;
LOC27 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC26, 0);
add_178482_2381377266(¶ms0, LOC27);
}
LA24: ;
LOC28 = (Ropeobj178006*)0;
LOC28 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[i_540213_839829468]);
add_178482_2381377266(¶ms0, LOC28);
}
LA4: ;
res_540448_839829468 += ((NI) 1);
} LA3: ;
}
}
fixupcall_539410_839829468(p0, le0, ri0, d0, op0.r, params0);
}
static N_INLINE(void, poststmtactions_532942_839829468)(Tcproc529021* p0) {
Ropeobj178006** LOC1;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC1, (*(*p0).module).injectstmt);
}
N_NIMCALL(void, gencall_543632_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
{
Ttype292840* LOC3;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, 2048);
if (!((*LOC3).callconv == ((Tcallingconvention292002) 8))) goto LA4;
genclosurecall_540452_839829468(p0, NIM_NIL, e0, d0);
}
goto LA1;
LA4: ;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC7)) goto LA8;
LOC7 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA8: ;
if (!LOC7) goto LA9;
geninfixcall_541929_839829468(p0, NIM_NIL, e0, d0);
}
goto LA1;
LA9: ;
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC12)) goto LA13;
LOC12 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 28))&31U)))!=0);
LA13: ;
if (!LOC12) goto LA14;
gennamedparamcall_542616_839829468(p0, e0, d0);
}
goto LA1;
LA14: ;
{
genprefixcall_539960_839829468(p0, NIM_NIL, e0, d0);
}
LA1: ;
poststmtactions_532942_839829468(p0);
}
N_NIMCALL(void, genreset_554731_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Tloc292816 a0;
TY532811 LOC1;
Ttype292840* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = addrloc_538204_839829468((&a0));
LOC2 = (Ttype292840*)0;
LOC2 = skiptypes_296099_850551059(a0.t, IL64(211106242013440));
LOC1[1] = gentypeinfo_535941_839829468((*p0).module, LOC2);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_496), LOC1, 2);
}
N_NIMCALL(void, genecho_554369_839829468)(Tcproc529021* p0, Tnode292802* n0) {
NIM_BOOL LOC6;
Ropeobj178006* args0;
Tloc292816 a0;
TY532811 LOC18;
NimStringDesc* LOC19;
NI LOC20;
NimStringDesc* LOC21;
TY533289 LOC22;
{
NimStringDesc* LOC5;
if (!!(((*n0).kind == ((Tnodekind292020) 41)))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = HEX24_196185_1689653243(T839829468_512);
internalerror_196113_155036129(LOC5);
}
LA3: ;
LOC6 = (NIM_BOOL)0;
LOC6 = includestr_147249_3771138726((&(*(*p0).module).headerfiles), ((NimStringDesc*) &T839829468_513));
args0 = NIM_NIL;
memset((void*)(&a0), 0, sizeof(a0));
{
NI i_554404_839829468;
NI HEX3Atmp_554431_839829468;
NI LOC8;
NI res_554434_839829468;
i_554404_839829468 = (NI)0;
HEX3Atmp_554431_839829468 = (NI)0;
LOC8 = (NI)0;
LOC8 = len_293081_850551059(n0);
HEX3Atmp_554431_839829468 = (NI)(LOC8 - ((NI) 1));
res_554434_839829468 = ((NI) 0);
{
while (1) {
if (!(res_554434_839829468 <= HEX3Atmp_554431_839829468)) goto LA10;
i_554404_839829468 = res_554434_839829468;
{
Tnode292802* LOC13;
LOC13 = (Tnode292802*)0;
LOC13 = skipconv_328882_3876443242((*n0).kindU.S6.sons->data[i_554404_839829468]);
if (!((*LOC13).kind == ((Tnodekind292020) 23))) goto LA14;
add_178487_2381377266(&args0, ((NimStringDesc*) &T839829468_514));
}
goto LA11;
LA14: ;
{
TY178507 LOC17;
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[i_554404_839829468], (&a0));
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = rdloc_538188_839829468((&a0));
addf_179205_2381377266(&args0, ((NimStringDesc*) &T839829468_515), LOC17, 1);
}
LA11: ;
res_554434_839829468 += ((NI) 1);
} LA10: ;
}
}
memset((void*)LOC18, 0, sizeof(LOC18));
LOC19 = (NimStringDesc*)0;
LOC20 = (NI)0;
LOC20 = len_293081_850551059(n0);
LOC21 = (NimStringDesc*)0;
LOC21 = nsuRepeatStr(((NimStringDesc*) &T839829468_517), ((NI) (LOC20)));
LOC19 = rawNewString(LOC21->Sup.len + tnl_176644_4151366050->Sup.len + 0);
appendString(LOC19, LOC21);
appendString(LOC19, tnl_176644_4151366050);
LOC18[0] = makecstring_191638_155036129(LOC19);
LOC18[1] = args0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_516), LOC18, 2);
memset((void*)LOC22, 0, sizeof(LOC22));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_518), LOC22, 0);
}
N_NIMCALL(void, genseqconstr_555004_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
Tloc292816 arr0;
NI LOC5;
Ropeobj178006* LOC6;
memset((void*)(&arr0), 0, sizeof(arr0));
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA3;
gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA3: ;
LOC5 = (NI)0;
LOC5 = sonslen_295351_850551059(t0);
LOC6 = (Ropeobj178006*)0;
LOC6 = intliteral_539270_839829468(((NI64) (LOC5)));
gennewseqaux_554795_839829468(p0, (&(*d0)), LOC6);
{
NI i_555031_839829468;
NI HEX3Atmp_555039_839829468;
NI LOC8;
NI res_555042_839829468;
i_555031_839829468 = (NI)0;
HEX3Atmp_555039_839829468 = (NI)0;
LOC8 = (NI)0;
LOC8 = sonslen_295351_850551059(t0);
HEX3Atmp_555039_839829468 = (NI)(LOC8 - ((NI) 1));
res_555042_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* LOC11;
Ttype292840* LOC12;
TY532811 LOC13;
if (!(res_555042_839829468 <= HEX3Atmp_555039_839829468)) goto LA10;
i_555031_839829468 = res_555042_839829468;
LOC11 = (Ttype292840*)0;
LOC11 = skiptypes_296099_850551059((*t0).typ, IL64(211106232576256));
LOC12 = (Ttype292840*)0;
LOC12 = elemtype_320394_3876443242(LOC11);
initloc_532273_839829468((&arr0), ((Tlockind292808) 6), LOC12, ((Tstorageloc292812) 3));
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = rdloc_538188_839829468((&(*d0)));
LOC13[1] = intliteral_539270_839829468(((NI64) (i_555031_839829468)));
arr0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC13, 2);
arr0.s = ((Tstorageloc292812) 3);
expr_539248_839829468(p0, (*t0).kindU.S6.sons->data[i_555031_839829468], (&arr0));
res_555042_839829468 += ((NI) 1);
} LA10: ;
}
}
gcusage_554439_839829468(t0);
}
N_NIMCALL(void, genarrtoseq_555046_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
Tloc292816 elem0;
Tloc292816 a0;
Tloc292816 arr0;
NI L0;
NI64 LOC9;
Ropeobj178006* LOC10;
{ memset((void*)(&elem0), 0, sizeof(elem0));
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&arr0), 0, sizeof(arr0));
{
if (!((*t0).kind == ((Tnodekind292020) 41))) goto LA3;
asgnRefNoCycle((void**) (&(*(*t0).kindU.S6.sons->data[((NI) 1)]).typ), (*t0).typ);
genseqconstr_555004_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], d0);
goto BeforeRet;
}
LA3: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA7;
gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA7: ;
LOC9 = (NI64)0;
LOC9 = lengthord_320007_3876443242((*(*t0).kindU.S6.sons->data[((NI) 1)]).typ);
L0 = ((NI) (LOC9));
LOC10 = (Ropeobj178006*)0;
LOC10 = intliteral_539270_839829468(((NI64) (L0)));
gennewseqaux_554795_839829468(p0, (&(*d0)), LOC10);
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], (&a0));
{
NI i_555090_839829468;
NI HEX3Atmp_555104_839829468;
NI res_555107_839829468;
i_555090_839829468 = (NI)0;
HEX3Atmp_555104_839829468 = (NI)0;
HEX3Atmp_555104_839829468 = (NI)(L0 - ((NI) 1));
res_555107_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* LOC14;
Ttype292840* LOC15;
TY532811 LOC16;
Ttype292840* LOC17;
Ttype292840* LOC18;
TY532811 LOC19;
if (!(res_555107_839829468 <= HEX3Atmp_555104_839829468)) goto LA13;
i_555090_839829468 = res_555107_839829468;
LOC14 = (Ttype292840*)0;
LOC14 = skiptypes_296099_850551059((*t0).typ, IL64(211106232576256));
LOC15 = (Ttype292840*)0;
LOC15 = elemtype_320394_3876443242(LOC14);
initloc_532273_839829468((&elem0), ((Tlockind292808) 6), LOC15, ((Tstorageloc292812) 3));
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468((&(*d0)));
LOC16[1] = intliteral_539270_839829468(((NI64) (i_555090_839829468)));
elem0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC16, 2);
elem0.s = ((Tstorageloc292812) 3);
LOC17 = (Ttype292840*)0;
LOC17 = skiptypes_296099_850551059((*(*t0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106232576256));
LOC18 = (Ttype292840*)0;
LOC18 = elemtype_320394_3876443242(LOC17);
initloc_532273_839829468((&arr0), ((Tlockind292808) 6), LOC18, a0.s);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdloc_538188_839829468((&a0));
LOC19[1] = intliteral_539270_839829468(((NI64) (i_555090_839829468)));
arr0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC19, 2);
genassignment_539264_839829468(p0, (&elem0), (&arr0), 3);
res_555107_839829468 += ((NI) 1);
} LA13: ;
}
}
}BeforeRet: ;
}
N_NIMCALL(void, gendeepcopy_550374_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0) {
Ttype292840* ty0;
ty0 = skiptypes_296099_850551059((*dest0).t, IL64(211106242013440));
switch ((*ty0).kind) {
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 22):
case ((Ttypekind292244) 25):
case ((Ttypekind292244) 18):
case ((Ttypekind292244) 17):
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
TY535238 LOC2;
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = addrloc_538204_839829468(dest0);
LOC2[1] = addrloc_538204_839829468(src0);
LOC2[2] = gentypeinfo_535941_839829468((*p0).module, (*dest0).t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_519), LOC2, 3);
}
break;
case ((Ttypekind292244) 24):
case ((Ttypekind292244) 28):
{
TY535238 LOC4;
memset((void*)LOC4, 0, sizeof(LOC4));
LOC4[0] = addrloc_538204_839829468(dest0);
LOC4[1] = rdloc_538188_839829468(src0);
LOC4[2] = gentypeinfo_535941_839829468((*p0).module, (*dest0).t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_520), LOC4, 3);
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
TY535238 LOC6;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = addrloc_538204_839829468(dest0);
LOC6[1] = addrloc_538204_839829468(src0);
LOC6[2] = gentypeinfo_535941_839829468((*p0).module, (*dest0).t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_521), LOC6, 3);
}
break;
case ((Ttypekind292244) 19):
{
{
Tctypekind529007 LOC10;
TY535238 LOC13;
NI64 LOC14;
LOC10 = (Tctypekind529007)0;
LOC10 = maptype_533394_839829468(ty0);
if (!(LOC10 == ((Tctypekind529007) 17))) goto LA11;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = rdloc_538188_839829468(dest0);
LOC13[1] = rdloc_538188_839829468(src0);
LOC14 = (NI64)0;
LOC14 = getsize_320135_3876443242((*dest0).t);
LOC13[2] = rope_178401_2381377266(LOC14);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_268), LOC13, 3);
}
goto LA8;
LA11: ;
{
TY532811 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468(dest0);
LOC16[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC16, 2);
}
LA8: ;
}
break;
case ((Ttypekind292244) 26):
case ((Ttypekind292244) 2):
case ((Ttypekind292244) 1):
case ((Ttypekind292244) 14):
case ((Ttypekind292244) 29):
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44):
case ((Ttypekind292244) 20):
case ((Ttypekind292244) 23):
{
TY532811 LOC18;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rdloc_538188_839829468(dest0);
LOC18[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC18, 2);
}
break;
default:
{
NimStringDesc* LOC20;
LOC20 = (NimStringDesc*)0;
LOC20 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI292244))->Sup.len + 13);
appendString(LOC20, ((NimStringDesc*) &T839829468_522));
appendString(LOC20, reprEnum((NI)(*ty0).kind, (&NTI292244)));
internalerror_196113_155036129(LOC20);
}
break;
}
}
N_NIMCALL(void, genmagicexpr_557033_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) {
switch (op0) {
case ((Tmagic292524) 127):
case ((Tmagic292524) 126):
{
genandor_554311_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 99) ... ((Tmagic292524) 117):
{
unaryarith_552646_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 96) ... ((Tmagic292524) 98):
{
unaryarithoverflow_551633_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 52) ... ((Tmagic292524) 55):
{
binaryfloatarith_556729_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 56) ... ((Tmagic292524) 93):
{
binaryarith_551819_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 95):
{
geneqproc_552214_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 45) ... ((Tmagic292524) 51):
{
binaryarithoverflow_551262_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 149):
{
genrepr_555339_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 259):
{
gengettypeinfo_555383_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 156):
{
genswap_555638_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 25):
{
{
if (!!((((*p0).options &(1U<<((NU)(((Toption169009) 5))&31U)))!=0))) goto LA14;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_385));
}
goto LA12;
LA14: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_386));
}
LA12: ;
}
break;
case ((Tmagic292524) 26):
case ((Tmagic292524) 27):
{
Ttype292840* underlying0;
underlying0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 9439232);
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = !((((*p0).options &(1U<<((NU)(((Toption169009) 5))&31U)))!=0));
if (LOC20) goto LA21;
LOC20 = ((IL64(34084860461056) &((NU64)1<<((NU)((*underlying0).kind)&63U)))!=0);
LA21: ;
if (!LOC20) goto LA22;
binarystmt_550501_839829468(p0, e0, d0, opr_557050_839829468[(op0)- 26]);
}
goto LA18;
LA22: ;
{
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* ranged0;
Ropeobj178006* res0;
NimStringDesc* LOC25;
TY532811 LOC31;
Ropeobj178006* LOC32;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
ranged0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 8390656);
LOC25 = (NimStringDesc*)0;
{
if (!((*underlying0).kind == ((Ttypekind292244) 35))) goto LA28;
LOC25 = copyString(fun64_557055_839829468[(op0)- 26]);
}
goto LA26;
LA28: ;
{
LOC25 = copyString(fun_557060_839829468[(op0)- 26]);
}
LA26: ;
res0 = binaryarithoverflowraw_551235_839829468(p0, ranged0, (&a0), (&b0), LOC25);
memset((void*)LOC31, 0, sizeof(LOC31));
LOC31[0] = gettypedesc_535673_839829468((*p0).module, ranged0);
LOC31[1] = res0;
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_370), LOC31, 2);
putintodest_550468_839829468(p0, (&a0), ranged0, LOC32, ((Tstorageloc292812) 0));
}
LA18: ;
}
break;
case ((Tmagic292524) 138):
{
genstrconcat_554452_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 144):
{
binarystmt_550501_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_394));
}
break;
case ((Tmagic292524) 145):
{
genstrappend_554554_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 146):
{
genseqelemappend_554683_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 128):
{
genstrequals_556667_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 129):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_402));
}
break;
case ((Tmagic292524) 130):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_403));
}
break;
case ((Tmagic292524) 157):
{
genisnil_552620_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 120):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_406));
}
break;
case ((Tmagic292524) 121):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_407));
}
break;
case ((Tmagic292524) 119):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_408));
}
break;
case ((Tmagic292524) 118):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_409));
}
break;
case ((Tmagic292524) 122):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_410));
}
break;
case ((Tmagic292524) 123):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_411));
}
break;
case ((Tmagic292524) 124):
{
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Tmagic292524) 125):
{
genrepr_555339_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 12):
{
genof_555331_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 29):
{
gennew_554782_839829468(p0, e0);
}
break;
case ((Tmagic292524) 30):
{
gennewfinalize_555111_839829468(p0, e0);
}
break;
case ((Tmagic292524) 31):
{
gennewseq_554824_839829468(p0, e0);
}
break;
case ((Tmagic292524) 32):
{
gennewseqofcap_554836_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 9):
{
Ttype292840* t0;
TY178507 LOC55;
Ropeobj178006* LOC56;
t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 256);
memset((void*)LOC55, 0, sizeof(LOC55));
LOC55[0] = gettypedesc_535673_839829468((*p0).module, t0);
LOC56 = (Ropeobj178006*)0;
LOC56 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_428), LOC55, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC56, ((Tstorageloc292812) 0));
}
break;
case ((Tmagic292524) 42):
{
gensomecast_556481_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 28):
{
genord_556475_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 35):
case ((Tmagic292524) 8):
case ((Tmagic292524) 34):
case ((Tmagic292524) 36):
case ((Tmagic292524) 33):
{
genarraylen_555415_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 37):
case ((Tmagic292524) 38):
{
{
NIM_BOOL LOC63;
LOC63 = (NIM_BOOL)0;
LOC63 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC63) goto LA64;
LOC63 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA64: ;
if (!!(LOC63)) goto LA65;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_440));
}
goto LA61;
LA65: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_441));
}
LA61: ;
}
break;
case ((Tmagic292524) 43):
{
unarystmt_550527_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_443));
}
break;
case ((Tmagic292524) 44):
{
unarystmt_550527_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_444));
}
break;
case ((Tmagic292524) 151):
{
gensetlengthstr_555632_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 152):
{
gensetlengthseq_555500_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 39):
case ((Tmagic292524) 40):
case ((Tmagic292524) 41):
case ((Tmagic292524) 133):
case ((Tmagic292524) 132):
case ((Tmagic292524) 131):
case ((Tmagic292524) 134):
case ((Tmagic292524) 135):
case ((Tmagic292524) 136):
case ((Tmagic292524) 148):
{
gensetop_556419_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 161):
case ((Tmagic292524) 162):
case ((Tmagic292524) 159):
case ((Tmagic292524) 160):
case ((Tmagic292524) 150):
case ((Tmagic292524) 163):
{
Tsym292834* opr0;
opr0 = (*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
NimStringDesc* LOC78;
Ropeobj178006* LOC79;
if (!!((((*opr0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0))) goto LA76;
LOC78 = (NimStringDesc*)0;
LOC78 = HEX24_178856_2381377266((*opr0).loc.r);
LOC79 = (Ropeobj178006*)0;
LOC79 = cgsym_532403_839829468((*p0).module, LOC78);
}
LA76: ;
gencall_543632_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 164):
{
genreset_554731_839829468(p0, e0);
}
break;
case ((Tmagic292524) 17):
{
Tnode292802* LOC82;
Tnode292802* LOC83;
LOC82 = (Tnode292802*)0;
LOC82 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1));
LOC83 = (Tnode292802*)0;
LOC83 = skipconv_328882_3876443242(LOC82);
genecho_554369_839829468(p0, LOC83);
}
break;
case ((Tmagic292524) 158):
{
genarrtoseq_555046_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 223) ... ((Tmagic292524) 257):
case ((Tmagic292524) 19) ... ((Tmagic292524) 24):
{
localerror_196080_155036129((*e0).info, ((Tmsgkind191002) 229), (*(*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).name).s);
}
break;
case ((Tmagic292524) 208):
{
Tnode292802* n0;
n0 = wrapprocforspawn_435501_2218250499((*(*p0).module).module, e0, (*e0).typ, NIM_NIL, NIM_NIL);
expr_539248_839829468(p0, n0, d0);
}
break;
case ((Tmagic292524) 155):
{
Tnode292802* n0;
n0 = liftparallel_478822_1773027539((*(*p0).module).module, e0);
expr_539248_839829468(p0, n0, d0);
}
break;
case ((Tmagic292524) 209):
{
Tloc292816 a0;
Tloc292816 b0;
Tnode292802* x0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
{
Tnode292802* LOC91;
Tnode292802* LOC94;
LOC91 = (Tnode292802*)0;
LOC91 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1));
if (!((*LOC91).kind == ((Tnodekind292020) 63) || (*LOC91).kind == ((Tnodekind292020) 64))) goto LA92;
LOC94 = (Tnode292802*)0;
LOC94 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1));
x0 = HEX5BHEX5D_293238_850551059(LOC94, ((NI) 0));
}
goto LA89;
LA92: ;
{
x0 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1));
}
LA89: ;
initlocexpr_539283_839829468(p0, x0, (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
gendeepcopy_550374_839829468(p0, (&a0), (&b0));
}
break;
case ((Tmagic292524) 140):
case ((Tmagic292524) 94):
{
gencall_543632_839829468(p0, e0, d0);
}
break;
default:
{
NimStringDesc* LOC98;
LOC98 = (NimStringDesc*)0;
LOC98 = rawNewString(reprEnum((NI)op0, (&NTI292524))->Sup.len + 14);
appendString(LOC98, ((NimStringDesc*) &T839829468_523));
appendString(LOC98, reprEnum((NI)op0, (&NTI292524)));
internalerror_196100_155036129((*e0).info, LOC98);
}
break;
}
}
N_NIMCALL(Ropeobj178006*, gensetnode_549664_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
Tbitset339004* cs0;
NI size0;
NI64 LOC1;
result0 = (Ropeobj178006*)0;
cs0 = (Tbitset339004*)0;
LOC1 = (NI64)0;
LOC1 = getsize_320135_3876443242((*n0).typ);
size0 = ((NI) (LOC1));
tobitset_340001_452470228(n0, (&cs0));
{
NI id0;
Ropeobj178006* LOC6;
if (!(((NI) 8) < size0)) goto LA4;
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
LOC6 = (Ropeobj178006*)0;
LOC6 = rope_178401_2381377266(((NI64) (id0)));
result0 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC6);
{
TY535238 LOC11;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA9;
(*(*p0).module).labels += ((NI) 1);
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = gettypedesc_535673_839829468((*p0).module, (*n0).typ);
LOC11[1] = result0;
LOC11[2] = genrawsetdata_549629_839829468(cs0, size0);
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_524), LOC11, 3);
}
LA9: ;
}
goto LA2;
LA4: ;
{
result0 = genrawsetdata_549629_839829468(cs0, size0);
}
LA2: ;
return result0;
}
N_NIMCALL(void, gensetconstr_557496_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 idx0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&idx0), 0, sizeof(idx0));
{
Ropeobj178006* LOC5;
if (!(((*e0).flags &(1U<<((NU)(((Tnodeflag292427) 4))&15U)))!=0)) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = gensetnode_549664_839829468(p0, e0);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC5, ((Tstorageloc292812) 0));
}
goto LA1;
LA3: ;
{
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA9;
gettemp_537032_839829468(p0, (*e0).typ, d0, NIM_FALSE);
}
LA9: ;
{
NI64 LOC13;
TY178507 LOC16;
LOC13 = (NI64)0;
LOC13 = getsize_320135_3876443242((*e0).typ);
if (!(IL64(8) < LOC13)) goto LA14;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468((&(*d0)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_525), LOC16, 1);
{
NI i_557537_839829468;
NI HEX3Atmp_557603_839829468;
NI LOC18;
NI res_557606_839829468;
i_557537_839829468 = (NI)0;
HEX3Atmp_557603_839829468 = (NI)0;
LOC18 = (NI)0;
LOC18 = sonslen_295351_850551059(e0);
HEX3Atmp_557603_839829468 = (NI)(LOC18 - ((NI) 1));
res_557606_839829468 = ((NI) 0);
{
while (1) {
if (!(res_557606_839829468 <= HEX3Atmp_557603_839829468)) goto LA20;
i_557537_839829468 = res_557606_839829468;
{
Ttype292840* LOC25;
TY535235 LOC26;
if (!((*(*e0).kindU.S6.sons->data[i_557537_839829468]).kind == ((Tnodekind292020) 44))) goto LA23;
LOC25 = (Ttype292840*)0;
LOC25 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC25, (&idx0), NIM_FALSE);
initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_557537_839829468]).kindU.S6.sons->data[((NI) 0)], (&a0));
initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_557537_839829468]).kindU.S6.sons->data[((NI) 1)], (&b0));
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = rdloc_538188_839829468((&idx0));
LOC26[1] = rdloc_538188_839829468((&(*d0)));
LOC26[2] = rdsetelemloc_555662_839829468((&a0), (*e0).typ);
LOC26[3] = rdsetelemloc_555662_839829468((&b0), (*e0).typ);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_526), LOC26, 4);
}
goto LA21;
LA23: ;
{
TY532811 LOC28;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[i_557537_839829468], (&a0));
memset((void*)LOC28, 0, sizeof(LOC28));
LOC28[0] = rdloc_538188_839829468((&(*d0)));
LOC28[1] = rdsetelemloc_555662_839829468((&a0), (*e0).typ);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_527), LOC28, 2);
}
LA21: ;
res_557606_839829468 += ((NI) 1);
} LA20: ;
}
}
}
goto LA11;
LA14: ;
{
NimStringDesc* ts0;
NimStringDesc* LOC30;
NI64 LOC31;
NimStringDesc* LOC32;
TY178507 LOC33;
LOC30 = (NimStringDesc*)0;
LOC31 = (NI64)0;
LOC31 = getsize_320135_3876443242((*e0).typ);
LOC32 = (NimStringDesc*)0;
LOC32 = nimInt64ToStr((NI64)(LOC31 * IL64(8)));
LOC30 = rawNewString(LOC32->Sup.len + 2);
appendString(LOC30, ((NimStringDesc*) &T839829468_45));
appendString(LOC30, LOC32);
ts0 = LOC30;
memset((void*)LOC33, 0, sizeof(LOC33));
LOC33[0] = rdloc_538188_839829468((&(*d0)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_494), LOC33, 1);
{
NI i_557575_839829468;
NI HEX3Atmp_557611_839829468;
NI LOC35;
NI res_557614_839829468;
i_557575_839829468 = (NI)0;
HEX3Atmp_557611_839829468 = (NI)0;
LOC35 = (NI)0;
LOC35 = sonslen_295351_850551059(e0);
HEX3Atmp_557611_839829468 = (NI)(LOC35 - ((NI) 1));
res_557614_839829468 = ((NI) 0);
{
while (1) {
if (!(res_557614_839829468 <= HEX3Atmp_557611_839829468)) goto LA37;
i_557575_839829468 = res_557614_839829468;
{
Ttype292840* LOC42;
NimStringDesc* LOC43;
TY535235 LOC44;
if (!((*(*e0).kindU.S6.sons->data[i_557575_839829468]).kind == ((Tnodekind292020) 44))) goto LA40;
LOC42 = (Ttype292840*)0;
LOC42 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC42, (&idx0), NIM_FALSE);
initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_557575_839829468]).kindU.S6.sons->data[((NI) 0)], (&a0));
initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_557575_839829468]).kindU.S6.sons->data[((NI) 1)], (&b0));
LOC43 = (NimStringDesc*)0;
LOC43 = rawNewString(ts0->Sup.len + ts0->Sup.len + 68);
appendString(LOC43, ((NimStringDesc*) &T839829468_528));
appendString(LOC43, ts0);
appendString(LOC43, ((NimStringDesc*) &T839829468_529));
appendString(LOC43, ts0);
appendString(LOC43, ((NimStringDesc*) &T839829468_454));
memset((void*)LOC44, 0, sizeof(LOC44));
LOC44[0] = rdloc_538188_839829468((&idx0));
LOC44[1] = rdloc_538188_839829468((&(*d0)));
LOC44[2] = rdsetelemloc_555662_839829468((&a0), (*e0).typ);
LOC44[3] = rdsetelemloc_555662_839829468((&b0), (*e0).typ);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC43, LOC44, 4);
}
goto LA38;
LA40: ;
{
NimStringDesc* LOC46;
TY532811 LOC47;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[i_557575_839829468], (&a0));
LOC46 = (NimStringDesc*)0;
LOC46 = rawNewString(ts0->Sup.len + ts0->Sup.len + 36);
appendString(LOC46, ((NimStringDesc*) &T839829468_530));
appendString(LOC46, ts0);
appendString(LOC46, ((NimStringDesc*) &T839829468_531));
appendString(LOC46, ts0);
appendString(LOC46, ((NimStringDesc*) &T839829468_454));
memset((void*)LOC47, 0, sizeof(LOC47));
LOC47[0] = rdloc_538188_839829468((&(*d0)));
LOC47[1] = rdsetelemloc_555662_839829468((&a0), (*e0).typ);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC46, LOC47, 2);
}
LA38: ;
res_557614_839829468 += ((NI) 1);
} LA37: ;
}
}
}
LA11: ;
}
LA1: ;
}
N_NIMCALL(void, exprcomplexconst_558684_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Ttype292840* t0;
Ropeobj178006* LOC1;
NI id0;
Ropeobj178006* tmp0;
Ropeobj178006* LOC2;
t0 = getuniquetype_528640_2036603609((*n0).typ);
LOC1 = (Ropeobj178006*)0;
LOC1 = gettypedesc_535673_839829468((*p0).module, t0);
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
LOC2 = (Ropeobj178006*)0;
LOC2 = rope_178401_2381377266(((NI64) (id0)));
tmp0 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC2);
{
TY535238 LOC7;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA5;
(*(*p0).module).labels += ((NI) 1);
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = gettypedesc_535673_839829468((*p0).module, t0);
LOC7[1] = tmp0;
LOC7[2] = genconstexpr_554849_839829468(p0, n0);
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC7, 3);
}
LA5: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA10;
fillloc_532282_839829468(d0, ((Tlockind292808) 8), t0, tmp0, ((Tstorageloc292812) 1));
}
goto LA8;
LA10: ;
{
putdataintodest_550436_839829468(p0, d0, t0, tmp0);
{
if (!!(((285212672 &((NU64)1<<((NU)((*t0).kind)&63U)))!=0))) goto LA15;
(*d0).s = ((Tstorageloc292812) 1);
}
LA15: ;
}
LA8: ;
}
N_NIMCALL(NIM_BOOL, handleconstexpr_554853_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
NI LOC6;
Ttype292840* t0;
Ropeobj178006* LOC10;
NI id0;
Ropeobj178006* LOC11;
Ropeobj178006* LOC12;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = ((*d0).k == ((Tlockind292808) 0));
if (!(LOC4)) goto LA5;
LOC6 = (NI)0;
LOC6 = len_293081_850551059(n0);
LOC4 = (((NI) (((*n0).kind == ((Tnodekind292020) 38)))) < LOC6);
LA5: ;
LOC3 = LOC4;
if (!(LOC3)) goto LA7;
LOC3 = isdeepconstexpr_318566_2616423590(n0);
LA7: ;
if (!LOC3) goto LA8;
t0 = getuniquetype_528640_2036603609((*n0).typ);
LOC10 = (Ropeobj178006*)0;
LOC10 = gettypedesc_535673_839829468((*p0).module, t0);
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
LOC11 = (Ropeobj178006*)0;
LOC11 = rope_178401_2381377266(((NI64) (id0)));
LOC12 = (Ropeobj178006*)0;
LOC12 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC11);
fillloc_532282_839829468(d0, ((Tlockind292808) 8), t0, LOC12, ((Tstorageloc292812) 1));
{
TY535238 LOC17;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA15;
(*(*p0).module).labels += ((NI) 1);
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = gettypedesc_535673_839829468((*p0).module, t0);
LOC17[1] = (*d0).r;
LOC17[2] = genconstexpr_554849_839829468(p0, n0);
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC17, 3);
}
LA15: ;
result0 = NIM_TRUE;
}
goto LA1;
LA8: ;
{
result0 = NIM_FALSE;
}
LA1: ;
return result0;
}
N_NIMCALL(void, genarrayconstr_558207_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 arr0;
memset((void*)(&arr0), 0, sizeof(arr0));
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = handleconstexpr_554853_839829468(p0, n0, d0);
if (!!(LOC3)) goto LA4;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA8;
gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
}
LA8: ;
{
NI i_558234_839829468;
NI HEX3Atmp_558242_839829468;
NI LOC11;
NI res_558245_839829468;
i_558234_839829468 = (NI)0;
HEX3Atmp_558242_839829468 = (NI)0;
LOC11 = (NI)0;
LOC11 = sonslen_295351_850551059(n0);
HEX3Atmp_558242_839829468 = (NI)(LOC11 - ((NI) 1));
res_558245_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* LOC14;
Ttype292840* LOC15;
TY532811 LOC16;
if (!(res_558245_839829468 <= HEX3Atmp_558242_839829468)) goto LA13;
i_558234_839829468 = res_558245_839829468;
LOC14 = (Ttype292840*)0;
LOC14 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256));
LOC15 = (Ttype292840*)0;
LOC15 = elemtype_320394_3876443242(LOC14);
initloc_532273_839829468((&arr0), ((Tlockind292808) 6), LOC15, (*d0).s);
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468((&(*d0)));
LOC16[1] = intliteral_539270_839829468(((NI64) (i_558234_839829468)));
arr0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_138), LOC16, 2);
expr_539248_839829468(p0, (*n0).kindU.S6.sons->data[i_558234_839829468], (&arr0));
res_558245_839829468 += ((NI) 1);
} LA13: ;
}
}
}
LA4: ;
}
N_NIMCALL(void, gentupleconstr_557618_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 rec0;
memset((void*)(&rec0), 0, sizeof(rec0));
{
NIM_BOOL LOC3;
Ttype292840* t0;
Ropeobj178006* LOC6;
LOC3 = (NIM_BOOL)0;
LOC3 = handleconstexpr_554853_839829468(p0, n0, d0);
if (!!(LOC3)) goto LA4;
t0 = getuniquetype_528640_2036603609((*n0).typ);
LOC6 = (Ropeobj178006*)0;
LOC6 = gettypedesc_535673_839829468((*p0).module, t0);
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA9;
gettemp_537032_839829468(p0, t0, d0, NIM_FALSE);
}
LA9: ;
{
NI i_557646_839829468;
NI HEX3Atmp_557803_839829468;
NI LOC12;
NI res_557806_839829468;
i_557646_839829468 = (NI)0;
HEX3Atmp_557803_839829468 = (NI)0;
LOC12 = (NI)0;
LOC12 = sonslen_295351_850551059(n0);
HEX3Atmp_557803_839829468 = (NI)(LOC12 - ((NI) 1));
res_557806_839829468 = ((NI) 0);
{
while (1) {
Tnode292802* it0;
TY532811 LOC19;
if (!(res_557806_839829468 <= HEX3Atmp_557803_839829468)) goto LA14;
i_557646_839829468 = res_557806_839829468;
it0 = (*n0).kindU.S6.sons->data[i_557646_839829468];
{
if (!((*it0).kind == ((Tnodekind292020) 34))) goto LA17;
it0 = (*it0).kindU.S6.sons->data[((NI) 1)];
}
LA17: ;
initloc_532273_839829468((&rec0), ((Tlockind292808) 6), (*it0).typ, (*d0).s);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdloc_538188_839829468((&(*d0)));
LOC19[1] = rope_178401_2381377266(((NI64) (i_557646_839829468)));
rec0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_185), LOC19, 2);
expr_539248_839829468(p0, it0, (&rec0));
res_557806_839829468 += ((NI) 1);
} LA14: ;
}
}
}
LA4: ;
}
N_NIMCALL(Tsym292834*, lookupfieldagain_553154_839829468)(Tcproc529021* p0, Ttype292840* ty_553157_839829468, Tsym292834* field0, Ropeobj178006** r0) {
Tsym292834* result0;
Ttype292840* ty0;
result0 = (Tsym292834*)0;
ty0 = ty_553157_839829468;
{
while (1) {
if (!!((ty0 == NIM_NIL))) goto LA2;
ty0 = skiptypes_296099_850551059(ty0, IL64(211106247215360));
result0 = lookupinrecord_299119_2984716966((*ty0).n, (*field0).name);
{
if (!!((result0 == NIM_NIL))) goto LA5;
goto LA1;
}
LA5: ;
{
NIM_BOOL LOC9;
LOC9 = (NIM_BOOL)0;
LOC9 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC9) goto LA10;
LOC9 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA10: ;
if (!!(LOC9)) goto LA11;
add_178487_2381377266(r0, ((NimStringDesc*) &T839829468_153));
}
LA11: ;
ty0 = getuniquetype_528640_2036603609((*ty0).sons->data[((NI) 0)]);
} LA2: ;
} LA1: ;
{
if (!(result0 == NIM_NIL)) goto LA15;
internalerror_196100_155036129((*field0).info, ((NimStringDesc*) &T839829468_532));
}
LA15: ;
return result0;
}
N_NIMCALL(void, genfieldcheck_553504_839829468)(Tcproc529021* p0, Tnode292802* e0, Ropeobj178006* obj0, Tsym292834* field0, Ttype292840* origty0) {
Tloc292816 test0;
Tloc292816 u0;
Tloc292816 v0;
memset((void*)(&test0), 0, sizeof(test0));
memset((void*)(&u0), 0, sizeof(u0));
memset((void*)(&v0), 0, sizeof(v0));
{
NI i_553525_839829468;
NI HEX3Atmp_554039_839829468;
NI LOC2;
NI res_554042_839829468;
i_553525_839829468 = (NI)0;
HEX3Atmp_554039_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(e0);
HEX3Atmp_554039_839829468 = (NI)(LOC2 - ((NI) 1));
res_554042_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* it0;
Tsym292834* op0;
Tnode292802* disc0;
Ropeobj178006* o0;
Tsym292834* d0;
NI id0;
Tnode292802* LOC9;
Ropeobj178006* strlit0;
if (!(res_554042_839829468 <= HEX3Atmp_554039_839829468)) goto LA4;
i_553525_839829468 = res_554042_839829468;
it0 = (*e0).kindU.S6.sons->data[i_553525_839829468];
op0 = (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
if (!((*op0).magic == ((Tmagic292524) 99))) goto LA7;
it0 = (*it0).kindU.S6.sons->data[((NI) 1)];
}
LA7: ;
disc0 = skipconv_328882_3876443242((*it0).kindU.S6.sons->data[((NI) 2)]);
initloc_532273_839829468((&test0), ((Tlockind292808) 0), (*it0).typ, ((Tstorageloc292812) 2));
initlocexpr_539283_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], (&u0));
o0 = obj0;
d0 = lookupfieldagain_553154_839829468(p0, origty0, (*disc0).kindU.S4.sym, &o0);
initloc_532273_839829468((&v0), ((Tlockind292808) 6), (*d0).typ, ((Tstorageloc292812) 0));
v0.r = o0;
add_178487_2381377266(&v0.r, ((NimStringDesc*) &T839829468_257));
add_178482_2381377266(&v0.r, (*d0).loc.r);
geninexpraux_553496_839829468(p0, it0, (&u0), (&v0), (&test0));
LOC9 = (Tnode292802*)0;
LOC9 = newstrnode_293677_850551059(((Tnodekind292020) 20), (*(*field0).name).s);
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), LOC9, ((NI) ((*(*p0).module).labels)));
{
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA12;
strlit0 = getstrlit_549468_839829468((*p0).module, (*(*field0).name).s);
}
goto LA10;
LA12: ;
{
Ropeobj178006* LOC15;
LOC15 = (Ropeobj178006*)0;
LOC15 = rope_178401_2381377266(((NI64) (id0)));
strlit0 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC15);
}
LA10: ;
{
TY532811 LOC20;
if (!((*op0).magic == ((Tmagic292524) 99))) goto LA18;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = rdloc_538188_839829468((&test0));
LOC20[1] = strlit0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_534), LOC20, 2);
}
goto LA16;
LA18: ;
{
TY532811 LOC22;
memset((void*)LOC22, 0, sizeof(LOC22));
LOC22[0] = rdloc_538188_839829468((&test0));
LOC22[1] = strlit0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_535), LOC22, 2);
}
LA16: ;
res_554042_839829468 += ((NI) 1);
} LA4: ;
}
}
}
N_NIMCALL(void, genobjconstr_554903_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 tmp0;
Ttype292840* t0;
NIM_BOOL isref0;
Ropeobj178006* r0;
Ropeobj178006* LOC13;
Ttype292840* ty0;
{ {
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = handleconstexpr_554853_839829468(p0, e0, d0);
if (!LOC3) goto LA4;
goto BeforeRet;
}
LA4: ;
memset((void*)(&tmp0), 0, sizeof(tmp0));
t0 = skiptypes_296099_850551059((*e0).typ, IL64(211106232576256));
gettemp_537032_839829468(p0, t0, (&tmp0), NIM_FALSE);
isref0 = ((*t0).kind == ((Ttypekind292244) 22));
r0 = rdloc_538188_839829468((&tmp0));
{
Ttype292840* LOC10;
TY178507 LOC11;
if (!isref0) goto LA8;
rawgennew_554741_839829468(p0, (&tmp0), NIM_NIL);
LOC10 = (Ttype292840*)0;
LOC10 = lastson_295377_850551059(t0);
t0 = skiptypes_296099_850551059(LOC10, IL64(211106232576256));
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = r0;
r0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC11, 1);
gcusage_554439_839829468(e0);
}
goto LA6;
LA8: ;
{
constructloc_538388_839829468(p0, (&tmp0), NIM_FALSE);
}
LA6: ;
LOC13 = (Ropeobj178006*)0;
LOC13 = gettypedesc_535673_839829468((*p0).module, t0);
ty0 = getuniquetype_528640_2036603609(t0);
{
NI i_554944_839829468;
NI HEX3Atmp_554997_839829468;
NI LOC15;
NI res_555000_839829468;
i_554944_839829468 = (NI)0;
HEX3Atmp_554997_839829468 = (NI)0;
LOC15 = (NI)0;
LOC15 = len_293081_850551059(e0);
HEX3Atmp_554997_839829468 = (LOC15 - 1);
res_555000_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* it0;
Tloc292816 tmp20;
Tsym292834* field0;
if (!(res_555000_839829468 <= HEX3Atmp_554997_839829468)) goto LA17;
i_554944_839829468 = res_555000_839829468;
it0 = (*e0).kindU.S6.sons->data[i_554944_839829468];
memset((void*)(&tmp20), 0, sizeof(tmp20));
tmp20.r = r0;
field0 = lookupfieldagain_553154_839829468(p0, ty0, (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym, &tmp20.r);
{
if (!((*field0).loc.r == NIM_NIL)) goto LA20;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_533));
}
LA20: ;
{
NIM_BOOL LOC24;
NI LOC25;
LOC24 = (NIM_BOOL)0;
LOC25 = (NI)0;
LOC25 = len_293081_850551059(it0);
LOC24 = (LOC25 == ((NI) 3));
if (!(LOC24)) goto LA26;
LOC24 = (((*p0).options &(1U<<((NU)(((Toption169009) 2))&31U)))!=0);
LA26: ;
if (!LOC24) goto LA27;
genfieldcheck_553504_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 2)], r0, field0, ty0);
}
LA27: ;
add_178487_2381377266(&tmp20.r, ((NimStringDesc*) &T839829468_257));
add_178482_2381377266(&tmp20.r, (*field0).loc.r);
tmp20.k = ((Tlockind292808) 1);
tmp20.t = (*field0).loc.t;
{
if (!isref0) goto LA31;
tmp20.s = ((Tstorageloc292812) 3);
}
goto LA29;
LA31: ;
{
tmp20.s = ((Tstorageloc292812) 2);
}
LA29: ;
expr_539248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], (&tmp20));
res_555000_839829468 += ((NI) 1);
} LA17: ;
}
}
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA36;
genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI292816));
}
goto LA34;
LA36: ;
{
genassignment_539264_839829468(p0, (&(*d0)), (&tmp0), 0);
}
LA34: ;
}BeforeRet: ;
}
N_NIMCALL(void, gencast_556538_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Ttype292840* destt0;
Ttype292840* srct0;
destt0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832));
srct0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106233624832));
{
NIM_BOOL LOC3;
Ropeobj178006* lbl0;
Tloc292816 tmp0;
TY178507 LOC7;
TY535238 LOC8;
TY178507 LOC9;
Ropeobj178006* LOC10;
LOC3 = (NIM_BOOL)0;
LOC3 = ((IL64(1030792609808) &((NU64)1<<((NU)((*destt0).kind)&63U)))!=0);
if (LOC3) goto LA4;
LOC3 = ((IL64(1030792609808) &((NU64)1<<((NU)((*srct0).kind)&63U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
(*p0).labels += ((NI) 1);
lbl0 = rope_178401_2381377266(((NI64) ((*p0).labels)));
memset((void*)(&tmp0), 0, sizeof(tmp0));
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = lbl0;
tmp0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_536), LOC7, 1);
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = gettypedesc_535673_839829468((*p0).module, srct0);
LOC8[1] = gettypedesc_535673_839829468((*p0).module, destt0);
LOC8[2] = lbl0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_537), LOC8, 3);
tmp0.k = ((Tlockind292808) 6);
tmp0.t = srct0;
tmp0.s = ((Tstorageloc292812) 2);
tmp0.flags = 0;
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0));
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = lbl0;
LOC10 = (Ropeobj178006*)0;
LOC10 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_538), LOC9, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC10, tmp0.s);
}
goto LA1;
LA5: ;
{
gensomecast_556481_839829468(p0, e0, d0);
}
LA1: ;
}
N_NIMCALL(void, genconv_556633_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Ttype292840* desttype0;
desttype0 = skiptypes_296099_850551059((*e0).typ, 8390656);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = comparetypes_326214_3876443242(desttype0, (*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, ((Tdistinctcompare324427) 1), 0);
if (!LOC3) goto LA4;
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], d0);
}
goto LA1;
LA4: ;
{
gensomecast_556481_839829468(p0, e0, d0);
}
LA1: ;
}
static N_INLINE(NIM_BOOL, iscppref_552807_839829468)(Tcproc529021* p0, Ttype292840* typ0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC2;
NIM_BOOL LOC3;
Ttype292840* LOC6;
Ttype292840* LOC8;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC2 = (NIM_BOOL)0;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
LOC2 = LOC3;
if (!(LOC2)) goto LA5;
LOC6 = (Ttype292840*)0;
LOC6 = skiptypes_296099_850551059(typ0, IL64(211106232576256));
LOC2 = ((*LOC6).kind == ((Ttypekind292244) 23));
LA5: ;
LOC1 = LOC2;
if (!(LOC1)) goto LA7;
LOC8 = (Ttype292840*)0;
LOC8 = skiptypes_296099_850551059(typ0, IL64(211106232576256));
LOC1 = !((((*LOC8).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0));
LA7: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, genaddr_553051_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
{
Ttype292840* LOC3;
Tloc292816 a0;
Ropeobj178006* LOC6;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
if (!((6291456 &((NU64)1<<((NU)((*LOC3).kind)&63U)))!=0)) goto LA4;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC6 = (Ropeobj178006*)0;
LOC6 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_52), a0.r);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC6, a0.s);
}
goto LA1;
LA4: ;
{
NIM_BOOL LOC8;
Tctypekind529007 LOC9;
LOC8 = (NIM_BOOL)0;
LOC9 = (Tctypekind529007)0;
LOC9 = maptype_533394_839829468((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ);
LOC8 = (LOC9 == ((Tctypekind529007) 17));
if (LOC8) goto LA10;
LOC8 = iscppref_552807_839829468(p0, (*(*e0).kindU.S6.sons->data[((NI) 0)]).typ);
LA10: ;
if (!LOC8) goto LA11;
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0);
}
goto LA1;
LA11: ;
{
Tloc292816 a0;
Ropeobj178006* LOC14;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC14 = (Ropeobj178006*)0;
LOC14 = addrloc_538204_839829468((&a0));
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC14, a0.s);
}
LA1: ;
}
N_NIMCALL(void, genarrayelem_554093_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* ty0;
Ttype292840* LOC1;
Ropeobj178006* first0;
NI64 LOC2;
Ttype292840* LOC47;
Ttype292840* LOC48;
TY535238 LOC49;
Ropeobj178006* LOC50;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, x0, (&a0));
initlocexpr_539283_839829468(p0, y0, (&b0));
LOC1 = (Ttype292840*)0;
LOC1 = skiptypes_296099_850551059(a0.t, IL64(211106242013440));
ty0 = skiptypes_296099_850551059(LOC1, IL64(211106247256320));
LOC2 = (NI64)0;
LOC2 = firstord_320001_3876443242(ty0);
first0 = intliteral_539270_839829468(LOC2);
{
NIM_BOOL LOC5;
LOC5 = (NIM_BOOL)0;
LOC5 = (((*p0).options &(1U<<((NU)(((Toption169009) 4))&31U)))!=0);
if (!(LOC5)) goto LA6;
LOC5 = !((((*ty0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0));
LA6: ;
if (!LOC5) goto LA7;
{
NIM_BOOL LOC11;
LOC11 = (NIM_BOOL)0;
LOC11 = isconstexpr_318510_2616423590(y0);
if (!!(LOC11)) goto LA12;
{
NI64 LOC16;
LOC16 = (NI64)0;
LOC16 = firstord_320001_3876443242(ty0);
if (!(LOC16 == IL64(0))) goto LA17;
{
NIM_BOOL LOC21;
NI64 LOC22;
NI64 LOC23;
NI64 LOC25;
NI64 LOC26;
TY532811 LOC29;
NI64 LOC30;
LOC21 = (NIM_BOOL)0;
LOC22 = (NI64)0;
LOC22 = firstord_320001_3876443242(b0.t);
LOC23 = (NI64)0;
LOC23 = firstord_320001_3876443242(ty0);
LOC21 = (LOC22 < LOC23);
if (LOC21) goto LA24;
LOC25 = (NI64)0;
LOC25 = lastord_320004_3876443242(ty0);
LOC26 = (NI64)0;
LOC26 = lastord_320004_3876443242(b0.t);
LOC21 = (LOC25 < LOC26);
LA24: ;
if (!LOC21) goto LA27;
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = rdcharloc_538227_839829468((&b0));
LOC30 = (NI64)0;
LOC30 = lastord_320004_3876443242(ty0);
LOC29[1] = intliteral_539270_839829468(LOC30);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_539), LOC29, 2);
}
LA27: ;
}
goto LA14;
LA17: ;
{
TY535238 LOC32;
NI64 LOC33;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = rdcharloc_538227_839829468((&b0));
LOC32[1] = first0;
LOC33 = (NI64)0;
LOC33 = lastord_320004_3876443242(ty0);
LOC32[2] = intliteral_539270_839829468(LOC33);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_540), LOC32, 3);
}
LA14: ;
}
goto LA9;
LA12: ;
{
NI64 idx0;
idx0 = getordvalue_320129_3876443242(y0);
{
NIM_BOOL LOC37;
NI64 LOC38;
NI64 LOC40;
LOC37 = (NIM_BOOL)0;
LOC38 = (NI64)0;
LOC38 = firstord_320001_3876443242(ty0);
LOC37 = (idx0 < LOC38);
if (LOC37) goto LA39;
LOC40 = (NI64)0;
LOC40 = lastord_320004_3876443242(ty0);
LOC37 = (LOC40 < idx0);
LA39: ;
if (!LOC37) goto LA41;
localerror_196080_155036129((*x0).info, ((Tmsgkind191002) 86), ((NimStringDesc*) &T839829468_490));
}
LA41: ;
}
LA9: ;
}
LA7: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA45;
(*d0).s = a0.s;
}
LA45: ;
LOC47 = (Ttype292840*)0;
LOC47 = skiptypes_296099_850551059(ty0, IL64(211106240964864));
LOC48 = (Ttype292840*)0;
LOC48 = elemtype_320394_3876443242(LOC47);
memset((void*)LOC49, 0, sizeof(LOC49));
LOC49[0] = rdloc_538188_839829468((&a0));
LOC49[1] = rdcharloc_538227_839829468((&b0));
LOC49[2] = first0;
LOC50 = (Ropeobj178006*)0;
LOC50 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_541), LOC49, 3);
putintodest_550468_839829468(p0, d0, LOC48, LOC50, a0.s);
}
N_NIMCALL(void, genopenarrayelem_554169_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* LOC10;
Ttype292840* LOC11;
TY532811 LOC12;
Ropeobj178006* LOC13;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, x0, (&a0));
initlocexpr_539283_839829468(p0, y0, (&b0));
{
TY532811 LOC5;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 4))&31U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468((&b0));
LOC5[1] = rdloc_538188_839829468((&a0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_542), LOC5, 2);
}
LA3: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA8;
(*d0).s = a0.s;
}
LA8: ;
LOC10 = (Ttype292840*)0;
LOC10 = skiptypes_296099_850551059(a0.t, IL64(211106240964864));
LOC11 = (Ttype292840*)0;
LOC11 = elemtype_320394_3876443242(LOC10);
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rdloc_538188_839829468((&a0));
LOC12[1] = rdcharloc_538227_839829468((&b0));
LOC13 = (Ropeobj178006*)0;
LOC13 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC12, 2);
putintodest_550468_839829468(p0, d0, LOC11, LOC13, a0.s);
}
N_NIMCALL(void, genseqelem_554205_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* ty0;
Ttype292840* LOC27;
Ttype292840* LOC28;
TY532811 LOC29;
Ropeobj178006* LOC30;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, x0, (&a0));
initlocexpr_539283_839829468(p0, y0, (&b0));
ty0 = skiptypes_296099_850551059(a0.t, IL64(211106242013440));
{
Ttype292840* LOC5;
if (!((6291456 &((NU64)1<<((NU)((*ty0).kind)&63U)))!=0)) goto LA3;
LOC5 = (Ttype292840*)0;
LOC5 = lastson_295377_850551059(ty0);
ty0 = skiptypes_296099_850551059(LOC5, IL64(211106242013440));
}
LA3: ;
{
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 4))&31U)))!=0)) goto LA8;
{
TY535238 LOC14;
if (!((*ty0).kind == ((Ttypekind292244) 28))) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rdloc_538188_839829468((&b0));
LOC14[1] = rdloc_538188_839829468((&a0));
LOC14[2] = lenfield_539305_839829468(p0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_543), LOC14, 3);
}
goto LA10;
LA12: ;
{
TY535238 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468((&b0));
LOC16[1] = rdloc_538188_839829468((&a0));
LOC16[2] = lenfield_539305_839829468(p0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_544), LOC16, 3);
}
LA10: ;
}
LA8: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA19;
(*d0).s = ((Tstorageloc292812) 3);
}
LA19: ;
{
Ttype292840* LOC23;
TY178507 LOC26;
LOC23 = (Ttype292840*)0;
LOC23 = skiptypes_296099_850551059(a0.t, IL64(211106240964864));
if (!((6291456 &((NU64)1<<((NU)((*LOC23).kind)&63U)))!=0)) goto LA24;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = a0.r;
a0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_124), LOC26, 1);
}
LA24: ;
LOC27 = (Ttype292840*)0;
LOC27 = skiptypes_296099_850551059(a0.t, IL64(211106240964864));
LOC28 = (Ttype292840*)0;
LOC28 = elemtype_320394_3876443242(LOC27);
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = rdloc_538188_839829468((&a0));
LOC29[1] = rdcharloc_538227_839829468((&b0));
LOC30 = (Ropeobj178006*)0;
LOC30 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC29, 2);
putintodest_550468_839829468(p0, d0, LOC28, LOC30, a0.s);
}
N_NIMCALL(void, gencstringelem_554144_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* ty0;
Ttype292840* LOC5;
Ttype292840* LOC6;
TY532811 LOC7;
Ropeobj178006* LOC8;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, x0, (&a0));
initlocexpr_539283_839829468(p0, y0, (&b0));
ty0 = skiptypes_296099_850551059(a0.t, IL64(211106242013440));
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA3;
(*d0).s = a0.s;
}
LA3: ;
LOC5 = (Ttype292840*)0;
LOC5 = skiptypes_296099_850551059(ty0, IL64(211106240964864));
LOC6 = (Ttype292840*)0;
LOC6 = elemtype_320394_3876443242(LOC5);
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_538188_839829468((&a0));
LOC7[1] = rdcharloc_538227_839829468((&b0));
LOC8 = (Ropeobj178006*)0;
LOC8 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC7, 2);
putintodest_550468_839829468(p0, d0, LOC6, LOC8, a0.s);
}
N_NIMCALL(void, gentupleelem_553124_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
NI i0;
Ropeobj178006* LOC5;
Ttype292840* ty0;
Ropeobj178006* r0;
TY178507 LOC8;
memset((void*)(&a0), 0, sizeof(a0));
i0 = (NI)0;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA3;
(*d0).s = a0.s;
}
LA3: ;
LOC5 = (Ropeobj178006*)0;
LOC5 = gettypedesc_535673_839829468((*p0).module, a0.t);
ty0 = getuniquetype_528640_2036603609(a0.t);
r0 = rdloc_538188_839829468((&a0));
switch ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind) {
case ((Tnodekind292020) 6) ... ((Tnodekind292020) 15):
{
i0 = ((NI) ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S1.intval));
}
break;
default:
{
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_545));
}
break;
}
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rope_178401_2381377266(((NI64) (i0)));
addf_179205_2381377266(&r0, ((NimStringDesc*) &T839829468_546), LOC8, 1);
putintodest_550468_839829468(p0, d0, (*ty0).sons->data[i0], r0, a0.s);
}
N_NIMCALL(void, genbracketexpr_554277_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Ttype292840* ty0;
ty0 = skiptypes_296099_850551059((*(*n0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106242013440));
{
Ttype292840* LOC5;
if (!((6291456 &((NU64)1<<((NU)((*ty0).kind)&63U)))!=0)) goto LA3;
LOC5 = (Ttype292840*)0;
LOC5 = lastson_295377_850551059(ty0);
ty0 = skiptypes_296099_850551059(LOC5, IL64(211106242013440));
}
LA3: ;
switch ((*ty0).kind) {
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
genarrayelem_554093_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
genopenarrayelem_554169_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Ttypekind292244) 24):
case ((Ttypekind292244) 28):
{
genseqelem_554205_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Ttypekind292244) 29):
{
gencstringelem_554144_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Ttypekind292244) 18):
{
gentupleelem_553124_839829468(p0, n0, d0);
}
break;
default:
{
NimStringDesc* LOC12;
LOC12 = (NimStringDesc*)0;
LOC12 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI292244))->Sup.len + 21);
appendString(LOC12, ((NimStringDesc*) &T839829468_547));
appendString(LOC12, reprEnum((NI)(*ty0).kind, (&NTI292244)));
appendChar(LOC12, 41);
internalerror_196100_155036129((*n0).info, LOC12);
}
break;
}
}
N_NIMCALL(void, genderef_543921_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NIM_BOOL enforcederef0) {
Tctypekind529007 mt0;
{ mt0 = maptype_533394_839829468((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = ((393216 &(1U<<((NU)(mt0)&31U)))!=0);
if (!(LOC3)) goto LA4;
LOC3 = !(enforcederef0);
LA4: ;
if (!LOC3) goto LA5;
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0);
{
Ttype292840* LOC9;
LOC9 = (Ttype292840*)0;
LOC9 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
if (!((*LOC9).kind == ((Ttypekind292244) 22))) goto LA10;
(*d0).s = ((Tstorageloc292812) 3);
}
LA10: ;
}
goto LA1;
LA5: ;
{
Tloc292816 a0;
Ttype292840* typ0;
memset((void*)(&a0), 0, sizeof(a0));
typ0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
NIM_BOOL LOC15;
NIM_BOOL LOC16;
NIM_BOOL LOC17;
NIM_BOOL LOC20;
Tnode292802* LOC25;
Tnode292802* LOC26;
LOC15 = (NIM_BOOL)0;
LOC16 = (NIM_BOOL)0;
LOC17 = (NIM_BOOL)0;
LOC17 = ((*typ0).kind == ((Ttypekind292244) 23));
if (!(LOC17)) goto LA18;
LOC17 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0));
LA18: ;
LOC16 = LOC17;
if (!(LOC16)) goto LA19;
LOC20 = (NIM_BOOL)0;
LOC20 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC20) goto LA21;
LOC20 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA21: ;
LOC16 = LOC20;
LA19: ;
LOC15 = LOC16;
if (!(LOC15)) goto LA22;
LOC15 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 64));
LA22: ;
if (!LOC15) goto LA23;
LOC25 = (Tnode292802*)0;
LOC25 = HEX5BHEX5D_293238_850551059(e0, ((NI) 0));
LOC26 = (Tnode292802*)0;
LOC26 = HEX5BHEX5D_293238_850551059(LOC25, ((NI) 0));
initlocexprsingleuse_539289_839829468(p0, LOC26, d0);
goto BeforeRet;
}
goto LA13;
LA23: ;
{
initlocexprsingleuse_539289_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
}
LA13: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA30;
switch ((*typ0).kind) {
case ((Ttypekind292244) 22):
{
(*d0).s = ((Tstorageloc292812) 3);
}
break;
case ((Ttypekind292244) 23):
{
(*d0).s = ((Tstorageloc292812) 0);
{
NIM_BOOL LOC36;
NIM_BOOL LOC37;
NIM_BOOL LOC39;
Ropeobj178006* LOC44;
LOC36 = (NIM_BOOL)0;
LOC37 = (NIM_BOOL)0;
LOC37 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0));
if (!(LOC37)) goto LA38;
LOC39 = (NIM_BOOL)0;
LOC39 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC39) goto LA40;
LOC39 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA40: ;
LOC37 = LOC39;
LA38: ;
LOC36 = LOC37;
if (!(LOC36)) goto LA41;
LOC36 = ((*e0).kind == ((Tnodekind292020) 65));
LA41: ;
if (!LOC36) goto LA42;
LOC44 = (Ropeobj178006*)0;
LOC44 = rdloc_538188_839829468((&a0));
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC44, a0.s);
goto BeforeRet;
}
LA42: ;
}
break;
case ((Ttypekind292244) 21):
{
(*d0).s = ((Tstorageloc292812) 0);
}
break;
default:
{
NimStringDesc* LOC47;
LOC47 = (NimStringDesc*)0;
LOC47 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI292244))->Sup.len + 9);
appendString(LOC47, ((NimStringDesc*) &T839829468_548));
appendString(LOC47, reprEnum((NI)(*typ0).kind, (&NTI292244)));
internalerror_196100_155036129((*e0).info, LOC47);
}
break;
}
}
goto LA28;
LA30: ;
{
NIM_BOOL LOC49;
LOC49 = (NIM_BOOL)0;
LOC49 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC49) goto LA50;
LOC49 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA50: ;
if (!LOC49) goto LA51;
{
NIM_BOOL LOC55;
NIM_BOOL LOC56;
Ropeobj178006* LOC61;
LOC55 = (NIM_BOOL)0;
LOC56 = (NIM_BOOL)0;
LOC56 = ((*typ0).kind == ((Ttypekind292244) 23));
if (!(LOC56)) goto LA57;
LOC56 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0));
LA57: ;
LOC55 = LOC56;
if (!(LOC55)) goto LA58;
LOC55 = ((*e0).kind == ((Tnodekind292020) 65));
LA58: ;
if (!LOC55) goto LA59;
LOC61 = (Ropeobj178006*)0;
LOC61 = rdloc_538188_839829468((&a0));
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC61, a0.s);
goto BeforeRet;
}
LA59: ;
}
goto LA28;
LA51: ;
LA28: ;
{
NIM_BOOL LOC64;
Ropeobj178006* LOC68;
LOC64 = (NIM_BOOL)0;
LOC64 = enforcederef0;
if (!(LOC64)) goto LA65;
LOC64 = (mt0 == ((Tctypekind529007) 18));
LA65: ;
if (!LOC64) goto LA66;
LOC68 = (Ropeobj178006*)0;
LOC68 = rdloc_538188_839829468((&a0));
putintodest_550468_839829468(p0, d0, (*a0.t).sons->data[((NI) 0)], LOC68, a0.s);
}
goto LA62;
LA66: ;
{
TY178507 LOC70;
Ropeobj178006* LOC71;
memset((void*)LOC70, 0, sizeof(LOC70));
LOC70[0] = rdloc_538188_839829468((&a0));
LOC71 = (Ropeobj178006*)0;
LOC71 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC70, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC71, a0.s);
}
LA62: ;
}
LA1: ;
}BeforeRet: ;
}
N_NIMCALL(Ttype292840*, genrecordfieldaux_553096_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tloc292816* a0) {
Ttype292840* result0;
Ropeobj178006* LOC9;
result0 = (Ttype292840*)0;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], a0);
{
if (!!(((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind292020) 3)))) goto LA3;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_549));
}
LA3: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA7;
(*d0).s = (*a0).s;
}
LA7: ;
LOC9 = (Ropeobj178006*)0;
LOC9 = gettypedesc_535673_839829468((*p0).module, (*a0).t);
result0 = getuniquetype_528640_2036603609((*a0).t);
return result0;
}
N_NIMCALL(void, genrecordfield_553448_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* ty0;
Ropeobj178006* r0;
Tsym292834* f0;
memset((void*)(&a0), 0, sizeof(a0));
ty0 = genrecordfieldaux_553096_839829468(p0, e0, d0, (&a0));
r0 = rdloc_538188_839829468((&a0));
f0 = (*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym;
{
TY178507 LOC5;
if (!((*ty0).kind == ((Ttypekind292244) 18))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_178401_2381377266(((NI64) ((*f0).position)));
addf_179205_2381377266(&r0, ((NimStringDesc*) &T839829468_546), LOC5, 1);
putintodest_550468_839829468(p0, d0, (*f0).typ, r0, a0.s);
}
goto LA1;
LA3: ;
{
Tsym292834* field0;
TY178507 LOC11;
field0 = lookupfieldagain_553154_839829468(p0, ty0, f0, &r0);
{
if (!((*field0).loc.r == NIM_NIL)) goto LA9;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_550));
}
LA9: ;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = (*field0).loc.r;
addf_179205_2381377266(&r0, ((NimStringDesc*) &T839829468_551), LOC11, 1);
putintodest_550468_839829468(p0, d0, (*field0).typ, r0, a0.s);
}
LA1: ;
}
N_NIMCALL(void, gencheckedrecordfield_554046_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
{
Tloc292816 a0;
Ttype292840* ty0;
Ropeobj178006* r0;
Tsym292834* f0;
Tsym292834* field0;
TY178507 LOC9;
Ropeobj178006* LOC10;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 2))&31U)))!=0)) goto LA3;
memset((void*)(&a0), 0, sizeof(a0));
ty0 = genrecordfieldaux_553096_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0, (&a0));
r0 = rdloc_538188_839829468((&a0));
f0 = (*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym;
field0 = lookupfieldagain_553154_839829468(p0, ty0, f0, &r0);
{
if (!((*field0).loc.r == NIM_NIL)) goto LA7;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_532));
}
LA7: ;
genfieldcheck_553504_839829468(p0, e0, r0, field0, ty0);
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = (*field0).loc.r;
LOC10 = (Ropeobj178006*)0;
LOC10 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_551), LOC9, 1);
add_178482_2381377266(&r0, LOC10);
putintodest_550468_839829468(p0, d0, (*field0).typ, r0, a0.s);
}
goto LA1;
LA3: ;
{
genrecordfield_553448_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0);
}
LA1: ;
}
N_NIMCALL(NI, startblock_543978_839829468)(Tcproc529021* p0, NimStringDesc* start0, Ropeobj178006** args0, NI args0Len0) {
NI result0;
result0 = (NI)0;
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), start0, args0, args0Len0);
(*p0).labels += ((NI) 1);
result0 = ((*p0).blocks ? (*p0).blocks->Sup.len : 0);
(*p0).blocks = (TY529095*) setLengthSeq(&((*p0).blocks)->Sup, sizeof(Tblock529019), ((NI) ((NI)(result0 + ((NI) 1)))));
(*p0).blocks->data[result0].id = ((NI) ((*p0).labels));
(*p0).blocks->data[result0].nestedtrystmts = ((NI16) (((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0)));
(*p0).blocks->data[result0].nestedexceptstmts = ((NI16) ((*p0).inexceptblock));
return result0;
}
N_NIMCALL(Ropeobj178006*, blockbody_544025_839829468)(Tblock529019* b0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = (*b0).sections[(((Tcprocsection529011) 0))- 0];
{
TY178507 LOC5;
if (!(((NI16) 0) < (*b0).framelen)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_178401_2381377266(((NI64) ((*b0).framelen)));
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_554), LOC5, 1);
}
LA3: ;
add_178482_2381377266(&result0, (*b0).sections[(((Tcprocsection529011) 1))- 0]);
add_178482_2381377266(&result0, (*b0).sections[(((Tcprocsection529011) 2))- 0]);
return result0;
}
N_NIMCALL(void, endblock_544035_839829468)(Tcproc529021* p0, Ropeobj178006* blockend0) {
NI topblock0;
Ropeobj178006* LOC1;
topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1));
LOC1 = (Ropeobj178006*)0;
LOC1 = blockbody_544025_839829468((&(*p0).blocks->data[topblock0]));
add_178482_2381377266(&(*p0).blocks->data[(NI)(topblock0 - ((NI) 1))].sections[(((Tcprocsection529011) 2))- 0], LOC1);
(*p0).blocks = (TY529095*) setLengthSeq(&((*p0).blocks)->Sup, sizeof(Tblock529019), ((NI) (topblock0)));
line_532690_839829468(p0, ((Tcprocsection529011) 2), blockend0);
}
N_NIMCALL(void, endblock_544060_839829468)(Tcproc529021* p0) {
NI topblock0;
Ropeobj178006* blockend0;
NI16 framelen0;
topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1));
{
TY178507 LOC5;
if (!!(((*p0).blocks->data[topblock0].label == NIM_NIL))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = (*p0).blocks->data[topblock0].label;
blockend0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_552), LOC5, 1);
}
goto LA1;
LA3: ;
{
TY533289 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
blockend0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_160), LOC7, 0);
}
LA1: ;
framelen0 = (*p0).blocks->data[topblock0].framelen;
{
TY178507 LOC12;
if (!(((NI16) 0) < framelen0)) goto LA10;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rope_178401_2381377266(((NI64) (framelen0)));
addf_179205_2381377266(&blockend0, ((NimStringDesc*) &T839829468_553), LOC12, 1);
}
LA10: ;
endblock_544035_839829468(p0, blockend0);
}
N_NIMCALL(void, genblock_546083_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
NI oldbreakidx_546099_839829468;
TY533289 LOC8;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_297441_850551059((*n0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind292808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
}
LA6: ;
oldbreakidx_546099_839829468 = (*p0).breakidx;
memset((void*)LOC8, 0, sizeof(LOC8));
(*p0).breakidx = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC8, 0);
{
Tsym292834* sym0;
if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA11;
sym0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
(*sym0).loc.k = ((Tlockind292808) 10);
(*sym0).position = (NI)((*p0).breakidx + ((NI) 1));
}
LA11: ;
expr_539248_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], d0);
endblock_544060_839829468(p0);
(*p0).breakidx = oldbreakidx_546099_839829468;
}
N_NIMCALL(void, genstmtlistexpr_558402_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
NI length0;
length0 = sonslen_295351_850551059(n0);
{
NI i_558420_839829468;
NI HEX3Atmp_558424_839829468;
NI res_558427_839829468;
i_558420_839829468 = (NI)0;
HEX3Atmp_558424_839829468 = (NI)0;
HEX3Atmp_558424_839829468 = (NI)(length0 - ((NI) 2));
res_558427_839829468 = ((NI) 0);
{
while (1) {
if (!(res_558427_839829468 <= HEX3Atmp_558424_839829468)) goto LA3;
i_558420_839829468 = res_558427_839829468;
genstmts_539244_839829468(p0, (*n0).kindU.S6.sons->data[i_558420_839829468]);
res_558427_839829468 += ((NI) 1);
} LA3: ;
}
}
{
if (!(((NI) 0) < length0)) goto LA6;
expr_539248_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))], d0);
}
LA6: ;
}
N_NIMCALL(void, genif_544982_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 a0;
Ropeobj178006* lelse0;
Ropeobj178006* lend0;
memset((void*)(&a0), 0, sizeof(a0));
lelse0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_297441_850551059((*n0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind292808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
}
LA6: ;
genlinedir_532823_839829468(p0, n0);
lend0 = getlabel_539217_839829468(p0);
{
NI i_545011_839829468;
NI HEX3Atmp_545435_839829468;
NI LOC9;
NI res_545438_839829468;
i_545011_839829468 = (NI)0;
HEX3Atmp_545435_839829468 = (NI)0;
LOC9 = (NI)0;
LOC9 = sonslen_295351_850551059(n0);
HEX3Atmp_545435_839829468 = (NI)(LOC9 - ((NI) 1));
res_545438_839829468 = ((NI) 0);
{
while (1) {
Tnode292802* it0;
if (!(res_545438_839829468 <= HEX3Atmp_545435_839829468)) goto LA11;
i_545011_839829468 = res_545438_839829468;
{
NIM_BOOL LOC14;
LOC14 = (NIM_BOOL)0;
LOC14 = ((*d0).k == ((Tlockind292808) 1));
if (!(LOC14)) goto LA15;
LOC14 = isemptytype_297441_850551059((*n0).typ);
LA15: ;
if (!LOC14) goto LA16;
(*d0).k = ((Tlockind292808) 0);
}
LA16: ;
it0 = (*n0).kindU.S6.sons->data[i_545011_839829468];
{
NI LOC20;
TY533289 LOC23;
NI LOC24;
TY532811 LOC25;
LOC20 = (NI)0;
LOC20 = len_293081_850551059(it0);
if (!(LOC20 == ((NI) 2))) goto LA21;
memset((void*)LOC23, 0, sizeof(LOC23));
LOC24 = (NI)0;
LOC24 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC23, 0);
initlocexprsingleuse_539289_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 0)], (&a0));
lelse0 = getlabel_539217_839829468(p0);
(*p0).labels += ((NI) 1);
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = rdloc_538188_839829468((&a0));
LOC25[1] = lelse0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_555), LOC25, 2);
{
NIM_BOOL LOC28;
Ropeobj178006** LOC32;
Ropeobj178006** LOC33;
LOC28 = (NIM_BOOL)0;
LOC28 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC28) goto LA29;
LOC28 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA29: ;
if (!LOC28) goto LA30;
LOC32 = (Ropeobj178006**)0;
LOC32 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178487_2381377266(LOC32, ((NimStringDesc*) &T839829468_223));
expr_539248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], d0);
LOC33 = (Ropeobj178006**)0;
LOC33 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178487_2381377266(LOC33, ((NimStringDesc*) &T839829468_280));
}
goto LA26;
LA30: ;
{
expr_539248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], d0);
}
LA26: ;
endblock_544060_839829468(p0);
{
NI LOC37;
TY178507 LOC40;
LOC37 = (NI)0;
LOC37 = sonslen_295351_850551059(n0);
if (!(((NI) 1) < LOC37)) goto LA38;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = lend0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_556), LOC40, 1);
}
LA38: ;
fixlabel_539230_839829468(p0, lelse0);
}
goto LA18;
LA21: ;
{
NI LOC42;
TY533289 LOC45;
NI LOC46;
LOC42 = (NI)0;
LOC42 = len_293081_850551059(it0);
if (!(LOC42 == ((NI) 1))) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC46 = (NI)0;
LOC46 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC45, 0);
expr_539248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 0)], d0);
endblock_544060_839829468(p0);
}
goto LA18;
LA43: ;
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_557));
}
LA18: ;
res_545438_839829468 += ((NI) 1);
} LA11: ;
}
}
{
NI LOC50;
LOC50 = (NI)0;
LOC50 = sonslen_295351_850551059(n0);
if (!(((NI) 1) < LOC50)) goto LA51;
fixlabel_539230_839829468(p0, lend0);
}
LA51: ;
}
N_NIMCALL(void, downconv_558581_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
expr_539248_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], d0);
}
goto LA1;
LA5: ;
{
Ttype292840* dest0;
Tnode292802* arg0;
Ttype292840* src0;
Tloc292816 a0;
Ropeobj178006* r0;
NIM_BOOL isref0;
Ttype292840* LOC10;
dest0 = skiptypes_296099_850551059((*n0).typ, IL64(211106247256320));
arg0 = (*n0).kindU.S6.sons->data[((NI) 0)];
{
while (1) {
if (!((*arg0).kind == ((Tnodekind292020) 66))) goto LA9;
arg0 = (*arg0).kindU.S6.sons->data[((NI) 0)];
} LA9: ;
}
src0 = skiptypes_296099_850551059((*arg0).typ, IL64(211106247256320));
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, arg0, (&a0));
r0 = rdloc_538188_839829468((&a0));
LOC10 = (Ttype292840*)0;
LOC10 = skiptypes_296099_850551059((*arg0).typ, IL64(211106232576256));
isref0 = ((14680064 &((NU64)1<<((NU)((*LOC10).kind)&63U)))!=0);
{
if (!isref0) goto LA13;
add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_558));
}
goto LA11;
LA13: ;
{
add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_153));
}
LA11: ;
{
NI i_558650_839829468;
NI HEX3Atmp_558677_839829468;
NI LOC17;
NI res_558680_839829468;
i_558650_839829468 = (NI)0;
HEX3Atmp_558677_839829468 = (NI)0;
LOC17 = (NI)0;
LOC17 = inheritancediff_326252_3876443242(dest0, src0);
HEX3Atmp_558677_839829468 = (LOC17 > 0? (LOC17) : -(LOC17));
res_558680_839829468 = ((NI) 2);
{
while (1) {
if (!(res_558680_839829468 <= HEX3Atmp_558677_839829468)) goto LA19;
i_558650_839829468 = res_558680_839829468;
add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_153));
res_558680_839829468 += ((NI) 1);
} LA19: ;
}
}
{
if (!isref0) goto LA22;
{
NIM_BOOL LOC26;
Ttype292840* LOC28;
TY532811 LOC31;
LOC26 = (NIM_BOOL)0;
LOC26 = ((*d0).k == ((Tlockind292808) 0));
if (!(LOC26)) goto LA27;
LOC28 = (Ttype292840*)0;
LOC28 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256));
LOC26 = ((14680064 &((NU64)1<<((NU)((*LOC28).kind)&63U)))!=0);
LA27: ;
if (!LOC26) goto LA29;
gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
memset((void*)LOC31, 0, sizeof(LOC31));
LOC31[0] = rdloc_538188_839829468((&(*d0)));
LOC31[1] = r0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_559), LOC31, 2);
}
goto LA24;
LA29: ;
{
r0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_52), r0);
putintodest_550468_839829468(p0, d0, (*n0).typ, r0, a0.s);
}
LA24: ;
}
goto LA20;
LA22: ;
{
putintodest_550468_839829468(p0, d0, (*n0).typ, r0, a0.s);
}
LA20: ;
}
LA1: ;
}
N_NIMCALL(void, upconv_558431_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* dest0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
dest0 = skiptypes_296099_850551059((*n0).typ, IL64(211106247256320));
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
Ropeobj178006* r0;
Ropeobj178006* nilcheck0;
Ttype292840* t0;
LOC3 = (NIM_BOOL)0;
LOC3 = (((*p0).options &(1U<<((NU)(((Toption169009) 1))&31U)))!=0);
if (!(LOC3)) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = isobjlackingtypefield_533515_839829468(dest0);
LOC3 = !(LOC5);
LA4: ;
if (!LOC3) goto LA6;
r0 = rdloc_538188_839829468((&a0));
nilcheck0 = NIM_NIL;
t0 = skiptypes_296099_850551059(a0.t, IL64(211106232576256));
{
while (1) {
Ttype292840* LOC23;
if (!((14680064 &((NU64)1<<((NU)((*t0).kind)&63U)))!=0)) goto LA9;
{
if (!!(((*t0).kind == ((Ttypekind292244) 23)))) goto LA12;
nilcheck0 = r0;
}
LA12: ;
{
NIM_BOOL LOC16;
NIM_BOOL LOC18;
TY178507 LOC22;
LOC16 = (NIM_BOOL)0;
LOC16 = !(((*t0).kind == ((Ttypekind292244) 23)));
if (LOC16) goto LA17;
LOC18 = (NIM_BOOL)0;
LOC18 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC18) goto LA19;
LOC18 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA19: ;
LOC16 = !(LOC18);
LA17: ;
if (!LOC16) goto LA20;
memset((void*)LOC22, 0, sizeof(LOC22));
LOC22[0] = r0;
r0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC22, 1);
}
LA20: ;
LOC23 = (Ttype292840*)0;
LOC23 = lastson_295377_850551059(t0);
t0 = skiptypes_296099_850551059(LOC23, IL64(211106232576256));
} LA9: ;
}
{
NIM_BOOL LOC26;
LOC26 = (NIM_BOOL)0;
LOC26 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC26) goto LA27;
LOC26 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA27: ;
if (!!(LOC26)) goto LA28;
{
while (1) {
NIM_BOOL LOC32;
LOC32 = (NIM_BOOL)0;
LOC32 = ((*t0).kind == ((Ttypekind292244) 17));
if (!(LOC32)) goto LA33;
LOC32 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL));
LA33: ;
if (!LOC32) goto LA31;
add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_153));
t0 = skiptypes_296099_850551059((*t0).sons->data[((NI) 0)], IL64(211106247215360));
} LA31: ;
}
}
LA28: ;
{
TY535238 LOC38;
if (!!((nilcheck0 == NIM_NIL))) goto LA36;
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = nilcheck0;
LOC38[1] = r0;
LOC38[2] = gentypeinfo_535941_839829468((*p0).module, dest0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_560), LOC38, 3);
}
goto LA34;
LA36: ;
{
TY532811 LOC40;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = r0;
LOC40[1] = gentypeinfo_535941_839829468((*p0).module, dest0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_561), LOC40, 2);
}
LA34: ;
}
LA6: ;
{
TY532811 LOC45;
Ropeobj178006* LOC46;
if (!!(((*(*(*n0).kindU.S6.sons->data[((NI) 0)]).typ).kind == ((Ttypekind292244) 17)))) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC45[0] = gettypedesc_535673_839829468((*p0).module, (*n0).typ);
LOC45[1] = rdloc_538188_839829468((&a0));
LOC46 = (Ropeobj178006*)0;
LOC46 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_430), LOC45, 2);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC46, a0.s);
}
goto LA41;
LA43: ;
{
TY532811 LOC48;
Ropeobj178006* LOC49;
memset((void*)LOC48, 0, sizeof(LOC48));
LOC48[0] = gettypedesc_535673_839829468((*p0).module, dest0);
LOC48[1] = addrloc_538204_839829468((&a0));
LOC49 = (Ropeobj178006*)0;
LOC49 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_429), LOC48, 2);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC49, a0.s);
}
LA41: ;
}
N_NIMCALL(void, genrangechck_556591_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0, NimStringDesc* magic0) {
Tloc292816 a0;
Ttype292840* dest0;
memset((void*)(&a0), 0, sizeof(a0));
dest0 = skiptypes_296099_850551059((*n0).typ, IL64(211106240964864));
{
NIM_BOOL LOC3;
Ttype292840* LOC5;
TY532811 LOC8;
Ropeobj178006* LOC9;
LOC3 = (NIM_BOOL)0;
LOC3 = !((((*p0).options &(1U<<((NU)(((Toption169009) 3))&31U)))!=0));
if (LOC3) goto LA4;
LOC5 = (Ttype292840*)0;
LOC5 = skiptypes_296099_850551059(dest0, 1048576);
LOC3 = ((IL64(34084860461056) &((NU64)1<<((NU)((*LOC5).kind)&63U)))!=0);
LA4: ;
if (!LOC3) goto LA6;
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = gettypedesc_535673_839829468((*p0).module, dest0);
LOC8[1] = rdcharloc_538227_839829468((&a0));
LOC9 = (Ropeobj178006*)0;
LOC9 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_430), LOC8, 2);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC9, a0.s);
}
goto LA1;
LA6: ;
{
TY536475 LOC11;
Ropeobj178006* LOC12;
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = gettypedesc_535673_839829468((*p0).module, dest0);
LOC11[1] = rdcharloc_538227_839829468((&a0));
LOC11[2] = genliteral_549476_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], dest0);
LOC11[3] = genliteral_549476_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 2)], dest0);
LOC11[4] = rope_178277_2381377266(magic0);
LOC12 = (Ropeobj178006*)0;
LOC12 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_562), LOC11, 5);
putintodest_550468_839829468(p0, d0, dest0, LOC12, a0.s);
}
LA1: ;
}
N_NIMCALL(void, convstrtocstr_556643_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* LOC1;
TY178507 LOC2;
Ropeobj178006* LOC3;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC1 = (Ttype292840*)0;
LOC1 = skiptypes_296099_850551059((*n0).typ, IL64(211106240964864));
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = rdloc_538188_839829468((&a0));
LOC3 = (Ropeobj178006*)0;
LOC3 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_485), LOC2, 1);
putintodest_550468_839829468(p0, d0, LOC1, LOC3, a0.s);
}
N_NIMCALL(void, convcstrtostr_556655_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* LOC1;
TY178507 LOC2;
Ropeobj178006* LOC3;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC1 = (Ttype292840*)0;
LOC1 = skiptypes_296099_850551059((*n0).typ, IL64(211106240964864));
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = rdloc_538188_839829468((&a0));
LOC3 = (Ropeobj178006*)0;
LOC3 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_411), LOC2, 1);
putintodest_550468_839829468(p0, d0, LOC1, LOC3, a0.s);
gcusage_554439_839829468(n0);
}
static N_INLINE(NIM_BOOL, isroutine_297324_850551059)(Tsym292834* s0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = ((258048 &(1U<<((NU)((*s0).kind)&31U)))!=0);
return result0;
}
static N_INLINE(NIM_BOOL, isconstclosure_557810_839829468)(Tnode292802* n0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC2;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC2 = (NIM_BOOL)0;
LOC2 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC2)) goto LA3;
LOC2 = isroutine_297324_850551059((*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym);
LA3: ;
LOC1 = LOC2;
if (!(LOC1)) goto LA4;
LOC1 = ((*(*n0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind292020) 23));
LA4: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, genclosure_557836_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
{
NIM_BOOL LOC3;
Ropeobj178006* tmp0;
Ropeobj178006* LOC6;
TY535238 LOC7;
LOC3 = (NIM_BOOL)0;
LOC3 = isconstclosure_557810_839829468(n0);
if (!LOC3) goto LA4;
(*(*p0).module).labels += ((NI) 1);
LOC6 = (Ropeobj178006*)0;
LOC6 = rope_178401_2381377266(((NI64) ((*(*p0).module).labels)));
tmp0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_566), LOC6);
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = gettypedesc_535673_839829468((*p0).module, (*n0).typ);
LOC7[1] = tmp0;
LOC7[2] = genconstexpr_554849_839829468(p0, n0);
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_524), LOC7, 3);
putintodest_550468_839829468(p0, d0, (*n0).typ, tmp0, ((Tstorageloc292812) 1));
}
goto LA1;
LA4: ;
{
Tloc292816 tmp0;
Tloc292816 a0;
Tloc292816 b0;
TY535238 LOC14;
memset((void*)(&tmp0), 0, sizeof(tmp0));
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&b0));
{
Tnode292802* LOC11;
LOC11 = (Tnode292802*)0;
LOC11 = skipconv_328882_3876443242((*n0).kindU.S6.sons->data[((NI) 0)]);
if (!((*LOC11).kind == ((Tnodekind292020) 155))) goto LA12;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_567));
}
LA12: ;
gettemp_537032_839829468(p0, (*n0).typ, (&tmp0), NIM_FALSE);
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rdloc_538188_839829468((&tmp0));
LOC14[1] = rdloc_538188_839829468((&a0));
LOC14[2] = rdloc_538188_839829468((&b0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_568), LOC14, 3);
putlocintodest_539258_839829468(p0, d0, (&tmp0));
}
LA1: ;
}
static N_INLINE(Ropeobj178006*, assignlabel_544020_839829468)(Tblock529019* b0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = rope_178401_2381377266(((NI64) ((*b0).id)));
unsureAsgnRef((void**) (&(*b0).label), HEX26_178452_2381377266(((NimStringDesc*) &T839829468_296), LOC1));
result0 = (*b0).label;
return result0;
}
N_NIMCALL(void, gencomputedgoto_545744_839829468)(Tcproc529021* p0, Tnode292802* n0) {
NI casepos0;
NI arraysize0;
NI id0;
Ropeobj178006* tmp0;
TY178507 LOC27;
Ropeobj178006* gotoarray0;
TY532811 LOC28;
TY178507 LOC33;
NI topblock0;
Ropeobj178006* oldbody0;
Ropeobj178006* tailb0;
Ropeobj178006* taila0;
Tnode292802* casestmt0;
Tloc292816 a_545871_839829468;
TY532811 LOC41;
{ casepos0 = ((NI) -1);
arraysize0 = (NI)0;
{
NI i_545768_839829468;
NI HEX3Atmp_545934_839829468;
NI LOC2;
NI res_545937_839829468;
i_545768_839829468 = (NI)0;
HEX3Atmp_545934_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = len_293081_850551059(n0);
HEX3Atmp_545934_839829468 = (LOC2 - 1);
res_545937_839829468 = ((NI) 0);
{
while (1) {
Tnode292802* it0;
if (!(res_545937_839829468 <= HEX3Atmp_545934_839829468)) goto LA4;
i_545768_839829468 = res_545937_839829468;
it0 = (*n0).kindU.S6.sons->data[i_545768_839829468];
{
NI64 asize0;
if (!((*it0).kind == ((Tnodekind292020) 97))) goto LA7;
{
Tnode292802* LOC11;
LOC11 = (Tnode292802*)0;
LOC11 = lastson_295364_850551059(it0);
if (!!(((*LOC11).kind == ((Tnodekind292020) 85)))) goto LA12;
localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_570));
goto BeforeRet;
}
LA12: ;
casepos0 = i_545768_839829468;
asize0 = lengthord_320007_3876443242((*(*it0).kindU.S6.sons->data[((NI) 0)]).typ);
{
if (!(IL64(10000) < asize0)) goto LA16;
localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_571));
goto BeforeRet;
}
LA16: ;
arraysize0 = ((NI) (asize0));
{
NI64 LOC20;
LOC20 = (NI64)0;
LOC20 = firstord_320001_3876443242((*(*it0).kindU.S6.sons->data[((NI) 0)]).typ);
if (!!((LOC20 == IL64(0)))) goto LA21;
localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_572));
goto BeforeRet;
}
LA21: ;
}
LA7: ;
res_545937_839829468 += ((NI) 1);
} LA4: ;
}
}
{
if (!(casepos0 < ((NI) 0))) goto LA25;
localerror_196085_155036129((*n0).info, ((NimStringDesc*) &T839829468_573));
goto BeforeRet;
}
LA25: ;
id0 = (NI)(((NI) ((*p0).labels)) + ((NI) 1));
(*p0).labels += (NI)(arraysize0 + ((NI) 1));
memset((void*)LOC27, 0, sizeof(LOC27));
LOC27[0] = rope_178401_2381377266(((NI64) (id0)));
tmp0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_574), LOC27, 1);
memset((void*)LOC28, 0, sizeof(LOC28));
LOC28[0] = tmp0;
LOC28[1] = rope_178401_2381377266(((NI64) (arraysize0)));
gotoarray0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_575), LOC28, 2);
{
NI i_545819_839829468;
NI HEX3Atmp_545942_839829468;
NI res_545945_839829468;
i_545819_839829468 = (NI)0;
HEX3Atmp_545942_839829468 = (NI)0;
HEX3Atmp_545942_839829468 = (NI)(arraysize0 - ((NI) 1));
res_545945_839829468 = ((NI) 1);
{
while (1) {
TY178507 LOC32;
if (!(res_545945_839829468 <= HEX3Atmp_545942_839829468)) goto LA31;
i_545819_839829468 = res_545945_839829468;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = rope_178401_2381377266(((NI64) ((NI)(((NI) (id0)) + i_545819_839829468))));
addf_179205_2381377266(&gotoarray0, ((NimStringDesc*) &T839829468_576), LOC32, 1);
res_545945_839829468 += ((NI) 1);
} LA31: ;
}
}
memset((void*)LOC33, 0, sizeof(LOC33));
LOC33[0] = rope_178401_2381377266(((NI64) ((NI)(((NI) (id0)) + arraysize0))));
addf_179205_2381377266(&gotoarray0, ((NimStringDesc*) &T839829468_577), LOC33, 1);
line_532690_839829468(p0, ((Tcprocsection529011) 0), gotoarray0);
topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1));
oldbody0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0];
asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0]), NIM_NIL);
{
NI j_545854_839829468;
NI HEX3Atmp_545950_839829468;
NI HEX3Atmp_545951_839829468;
NI LOC35;
NI res_545954_839829468;
j_545854_839829468 = (NI)0;
HEX3Atmp_545950_839829468 = (NI)0;
HEX3Atmp_545951_839829468 = (NI)0;
HEX3Atmp_545950_839829468 = (NI)(casepos0 + ((NI) 1));
LOC35 = (NI)0;
LOC35 = len_293081_850551059(n0);
HEX3Atmp_545951_839829468 = (LOC35 - 1);
res_545954_839829468 = HEX3Atmp_545950_839829468;
{
while (1) {
if (!(res_545954_839829468 <= HEX3Atmp_545951_839829468)) goto LA37;
j_545854_839829468 = res_545954_839829468;
genstmts_539244_839829468(p0, (*n0).kindU.S6.sons->data[j_545854_839829468]);
res_545954_839829468 += ((NI) 1);
} LA37: ;
}
}
tailb0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0];
asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0]), NIM_NIL);
{
NI j_545866_839829468;
NI HEX3Atmp_545959_839829468;
NI res_545962_839829468;
j_545866_839829468 = (NI)0;
HEX3Atmp_545959_839829468 = (NI)0;
HEX3Atmp_545959_839829468 = (NI)(casepos0 - ((NI) 1));
res_545962_839829468 = ((NI) 0);
{
while (1) {
if (!(res_545962_839829468 <= HEX3Atmp_545959_839829468)) goto LA40;
j_545866_839829468 = res_545962_839829468;
genstmts_539244_839829468(p0, (*n0).kindU.S6.sons->data[j_545866_839829468]);
res_545962_839829468 += ((NI) 1);
} LA40: ;
}
}
taila0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0];
asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0]), HEX26_178418_2381377266(oldbody0, taila0));
casestmt0 = (*n0).kindU.S6.sons->data[casepos0];
memset((void*)(&a_545871_839829468), 0, sizeof(a_545871_839829468));
initlocexpr_539283_839829468(p0, (*casestmt0).kindU.S6.sons->data[((NI) 0)], (&a_545871_839829468));
memset((void*)LOC41, 0, sizeof(LOC41));
LOC41[0] = tmp0;
LOC41[1] = rdloc_538188_839829468((&a_545871_839829468));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_578), LOC41, 2);
{
NI i_545894_839829468;
NI HEX3Atmp_545978_839829468;
NI LOC43;
NI res_545981_839829468;
i_545894_839829468 = (NI)0;
HEX3Atmp_545978_839829468 = (NI)0;
LOC43 = (NI)0;
LOC43 = len_293081_850551059(casestmt0);
HEX3Atmp_545978_839829468 = (LOC43 - 1);
res_545981_839829468 = ((NI) 1);
{
while (1) {
TY533289 LOC46;
NI LOC47;
Tnode292802* it0;
Tnode292802* LOC57;
Ropeobj178006** LOC58;
Ropeobj178006** LOC59;
Tloc292816 a0;
TY532811 LOC60;
if (!(res_545981_839829468 <= HEX3Atmp_545978_839829468)) goto LA45;
i_545894_839829468 = res_545981_839829468;
memset((void*)LOC46, 0, sizeof(LOC46));
LOC47 = (NI)0;
LOC47 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC46, 0);
it0 = (*casestmt0).kindU.S6.sons->data[i_545894_839829468];
{
NI j_545910_839829468;
NI HEX3Atmp_545970_839829468;
NI LOC49;
NI res_545973_839829468;
j_545910_839829468 = (NI)0;
HEX3Atmp_545970_839829468 = (NI)0;
LOC49 = (NI)0;
LOC49 = len_293081_850551059(it0);
HEX3Atmp_545970_839829468 = (NI)(LOC49 - ((NI) 2));
res_545973_839829468 = ((NI) 0);
{
while (1) {
NI64 val0;
TY178507 LOC56;
if (!(res_545973_839829468 <= HEX3Atmp_545970_839829468)) goto LA51;
j_545910_839829468 = res_545973_839829468;
{
if (!((*(*it0).kindU.S6.sons->data[j_545910_839829468]).kind == ((Tnodekind292020) 44))) goto LA54;
localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_579));
goto BeforeRet;
}
LA54: ;
val0 = getordvalue_320129_3876443242((*it0).kindU.S6.sons->data[j_545910_839829468]);
memset((void*)LOC56, 0, sizeof(LOC56));
LOC56[0] = intliteral_539270_839829468((NI64)((NI64)(val0 + ((NI64) (id0))) + IL64(1)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_580), LOC56, 1);
res_545973_839829468 += ((NI) 1);
} LA51: ;
}
}
LOC57 = (Tnode292802*)0;
LOC57 = lastson_295364_850551059(it0);
genstmts_539244_839829468(p0, LOC57);
LOC58 = (Ropeobj178006**)0;
LOC58 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC58, tailb0);
LOC59 = (Ropeobj178006**)0;
LOC59 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC59, taila0);
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*casestmt0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC60, 0, sizeof(LOC60));
LOC60[0] = tmp0;
LOC60[1] = rdloc_538188_839829468((&a0));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_578), LOC60, 2);
endblock_544060_839829468(p0);
res_545981_839829468 += ((NI) 1);
} LA45: ;
}
}
}BeforeRet: ;
}
N_NIMCALL(void, genwhilestmt_545985_839829468)(Tcproc529021* p0, Tnode292802* t0) {
Tloc292816 a0;
NI oldbreakidx_546011_839829468;
TY533289 LOC1;
Tnode292802* loopbody0;
memset((void*)(&a0), 0, sizeof(a0));
(*p0).withinloop += ((NI) 1);
genlinedir_532823_839829468(p0, t0);
oldbreakidx_546011_839829468 = (*p0).breakidx;
memset((void*)LOC1, 0, sizeof(LOC1));
(*p0).breakidx = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_569), LOC1, 0);
(*p0).blocks->data[(*p0).breakidx].isloop = NIM_TRUE;
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0));
{
NIM_BOOL LOC4;
Ropeobj178006* label0;
TY532811 LOC8;
LOC4 = (NIM_BOOL)0;
LOC4 = !(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 6)));
if (LOC4) goto LA5;
LOC4 = ((*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval == IL64(0));
LA5: ;
if (!LOC4) goto LA6;
label0 = assignlabel_544020_839829468((&(*p0).blocks->data[(*p0).breakidx]));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468((&a0));
LOC8[1] = label0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_555), LOC8, 2);
}
LA6: ;
loopbody0 = (*t0).kindU.S6.sons->data[((NI) 1)];
{
NIM_BOOL LOC11;
LOC11 = (NIM_BOOL)0;
LOC11 = stmtscontainpragma_528083_2036603609(loopbody0, ((Tspecialword275003) 182));
if (!(LOC11)) goto LA12;
LOC11 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 1))&7U)))!=0);
LA12: ;
if (!LOC11) goto LA13;
{
NIM_BOOL LOC17;
NI LOC18;
LOC17 = (NIM_BOOL)0;
LOC18 = (NI)0;
LOC18 = len_293081_850551059(loopbody0);
LOC17 = (LOC18 == ((NI) 2));
if (!(LOC17)) goto LA19;
LOC17 = ((*(*loopbody0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1));
LA19: ;
if (!LOC17) goto LA20;
loopbody0 = (*loopbody0).kindU.S6.sons->data[((NI) 1)];
}
LA20: ;
gencomputedgoto_545744_839829468(p0, loopbody0);
}
goto LA9;
LA13: ;
{
genstmts_539244_839829468(p0, loopbody0);
}
LA9: ;
{
TY533289 LOC27;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 19))&31U)))!=0)) goto LA25;
memset((void*)LOC27, 0, sizeof(LOC27));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_581), LOC27, 0);
}
LA25: ;
endblock_544060_839829468(p0);
(*p0).breakidx = oldbreakidx_546011_839829468;
(*p0).withinloop -= ((NI) 1);
}
N_NIMCALL(void, gengotovar_544258_839829468)(Tcproc529021* p0, Tnode292802* value0) {
{
if (!!(((*value0).kind >= ((Tnodekind292020) 5) && (*value0).kind <= ((Tnodekind292020) 15)))) goto LA3;
localerror_196085_155036129((*value0).info, ((NimStringDesc*) &T839829468_582));
}
goto LA1;
LA3: ;
{
TY178507 LOC6;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rope_178401_2381377266((*value0).kindU.S1.intval);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_583), LOC6, 1);
}
LA1: ;
}
N_NIMCALL(void, varindynamiclib_538812_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
Tlib292820* lib0;
Ropeobj178006* extname0;
Ropeobj178006* tmp0;
TY535235 LOC1;
NimStringDesc* LOC2;
TY532811 LOC3;
lib0 = (*sym0).annex;
extname0 = (*sym0).loc.r;
loaddynamiclib_559481_839829468(m0, lib0);
(*sym0).loc.flags |= ((NU16)1)<<((((Tlocflag292810) 0))%(sizeof(NU16)*8));
tmp0 = mangledynlibproc_538816_839829468(sym0);
asgnRefNoCycle((void**) (&(*sym0).loc.r), tmp0);
(*m0).labels += ((NI) 2);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = tmp0;
LOC1[1] = gettypedesc_535673_839829468(m0, (*sym0).typ);
LOC1[2] = (*lib0).name;
LOC2 = (NimStringDesc*)0;
LOC2 = HEX24_178856_2381377266(extname0);
LOC1[3] = makecstring_191638_155036129(LOC2);
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 16))- 0], ((NimStringDesc*) &T839829468_584), LOC1, 4);
memset((void*)LOC3, 0, sizeof(LOC3));
LOC3[0] = (*sym0).loc.r;
LOC3[1] = gettypedesc_535673_839829468(m0, (*sym0).loc.t);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_585), LOC3, 2);
}
N_NIMCALL(void, assignglobalvar_538819_839829468)(Tcproc529021* p0, Tsym292834* s0) {
{ {
Ropeobj178006* LOC5;
if (!((*s0).loc.k == ((Tlockind292808) 0))) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = manglename_533205_839829468(s0);
fillloc_532282_839829468((&(*s0).loc), ((Tlockind292808) 3), (*s0).typ, LOC5, ((Tstorageloc292812) 3));
}
LA3: ;
{
Tcgen529027* q0;
if (!(((*s0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0)) goto LA8;
q0 = findpendingmodule_532241_839829468((*p0).module, s0);
{
NIM_BOOL LOC12;
NIM_BOOL LOC14;
LOC12 = (NIM_BOOL)0;
LOC12 = !((q0 == NIM_NIL));
if (!(LOC12)) goto LA13;
LOC14 = (NIM_BOOL)0;
LOC14 = containsorincl_268862_2627731572((&(*q0).declaredthings), (*s0).Sup.id);
LOC12 = !(LOC14);
LA13: ;
if (!LOC12) goto LA15;
varindynamiclib_538812_839829468(q0, s0);
}
goto LA10;
LA15: ;
{
asgnRefNoCycle((void**) (&(*s0).loc.r), mangledynlibproc_538816_839829468(s0));
}
LA10: ;
goto BeforeRet;
}
LA8: ;
useheader_532369_839829468((*p0).module, s0);
{
if (!(((*s0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0)) goto LA20;
goto BeforeRet;
}
LA20: ;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 22))&31U)))!=0)) goto LA24;
declarethreadvar_538676_839829468((*p0).module, s0, (((*s0).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0));
}
goto LA22;
LA24: ;
{
Ropeobj178006* decl0;
Ropeobj178006* td0;
decl0 = NIM_NIL;
td0 = gettypedesc_535673_839829468((*p0).module, (*s0).loc.t);
{
TY178507 LOC43;
if (!(*s0).constraint == 0) goto LA29;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0)) goto LA33;
add_178487_2381377266(&decl0, ((NimStringDesc*) &T839829468_240));
}
LA33: ;
add_178482_2381377266(&decl0, td0);
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 8))&31U)))!=0)) goto LA37;
add_178487_2381377266(&decl0, ((NimStringDesc*) &T839829468_121));
}
LA37: ;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 7))&31U)))!=0)) goto LA41;
add_178487_2381377266(&decl0, ((NimStringDesc*) &T839829468_122));
}
LA41: ;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC43[0] = (*s0).loc.r;
addf_179205_2381377266(&decl0, ((NimStringDesc*) &T839829468_242), LOC43, 1);
}
goto LA27;
LA29: ;
{
NimStringDesc* LOC45;
TY532811 LOC46;
LOC45 = (NimStringDesc*)0;
LOC45 = rawNewString((*(*s0).constraint).kindU.S3.strval->Sup.len + 3);
appendString(LOC45, (*(*s0).constraint).kindU.S3.strval);
appendString(LOC45, ((NimStringDesc*) &T839829468_497));
memset((void*)LOC46, 0, sizeof(LOC46));
LOC46[0] = td0;
LOC46[1] = (*s0).loc.r;
decl0 = HEX25_178905_2381377266(LOC45, LOC46, 2);
}
LA27: ;
add_178482_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 9))- 0], decl0);
}
LA22: ;
{
if (!(((NI) 0) < (*p0).withinloop)) goto LA49;
resetloc_538350_839829468(p0, (&(*s0).loc));
}
LA49: ;
{
TY535238 LOC55;
NimStringDesc* LOC56;
NimStringDesc* LOC57;
if (!(((*(*(*p0).module).module).options & 163840) == 163840)) goto LA53;
memset((void*)LOC55, 0, sizeof(LOC55));
LOC56 = (NimStringDesc*)0;
LOC56 = rawNewString((*(*(*s0).owner).name).s->Sup.len + (*(*s0).name).s->Sup.len + 1);
appendString(LOC56, (*(*(*s0).owner).name).s);
appendChar(LOC56, 46);
appendString(LOC56, (*(*s0).name).s);
LOC57 = (NimStringDesc*)0;
LOC57 = nsuNormalize(LOC56);
LOC55[0] = makecstring_191638_155036129(LOC57);
LOC55[1] = (*s0).loc.r;
LOC55[2] = gentypeinfo_535941_839829468((*p0).module, (*s0).typ);
appcg_532632_839829468((*p0).module, &(*(*p0).module).s[(((Tcfilesection529005) 15))- 0], ((NimStringDesc*) &T839829468_586), LOC55, 3);
}
LA53: ;
}BeforeRet: ;
}
N_NIMCALL(Ropeobj178006*, gentraverseprocforglobal_538032_839829468)(Tcgen529027* m0, Tsym292834* s0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
Ttraversalclosure537019 c0;
Tcproc529021* p0;
Ropeobj178006* sloc0;
Ropeobj178006* header0;
TY178507 LOC8;
Ropeobj178006* generatedproc0;
TY535235 LOC9;
Ropeobj178006** LOC10;
Ropeobj178006** LOC11;
Ropeobj178006** LOC12;
TY178507 LOC13;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = gentypeinfo_535941_839829468(m0, (*s0).loc.t);
memset((void*)(&c0), 0, sizeof(c0));
p0 = newproc_529206_3723162438(NIM_NIL, m0);
sloc0 = (*s0).loc.r;
result0 = gettempname_533598_839829468(m0);
{
NIM_BOOL LOC4;
LOC4 = (NIM_BOOL)0;
LOC4 = (((*s0).flags &(1U<<((NU)(((Tsymflag292184) 22))&31U)))!=0);
if (!(LOC4)) goto LA5;
LOC4 = emulatedthreadvars_532949_839829468();
LA5: ;
if (!LOC4) goto LA6;
accessthreadlocalvar_532945_839829468(p0, s0);
sloc0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_288), sloc0);
}
LA6: ;
c0.visitorfrmt = copyString(((NimStringDesc*) &T839829468_587));
c0.p = p0;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = result0;
header0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_588), LOC8, 1);
gentraverseproc_537022_839829468((&c0), sloc0, (*s0).loc.t);
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = header0;
LOC10 = (Ropeobj178006**)0;
LOC10 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
LOC9[1] = (*LOC10);
LOC11 = (Ropeobj178006**)0;
LOC11 = s_529179_3723162438(p0, ((Tcprocsection529011) 1));
LOC9[2] = (*LOC11);
LOC12 = (Ropeobj178006**)0;
LOC12 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
LOC9[3] = (*LOC12);
generatedproc0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_190), LOC9, 4);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = header0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 7))- 0], ((NimStringDesc*) &T839829468_191), LOC13, 1);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 10))- 0], generatedproc0);
return result0;
}
N_NIMCALL(void, registergcroot_543762_839829468)(Tcproc529021* p0, Tsym292834* v0) {
{
NIM_BOOL LOC3;
Ropeobj178006* prc0;
Ropeobj178006** LOC7;
TY178507 LOC8;
LOC3 = (NIM_BOOL)0;
LOC3 = ((240 &(1U<<((NU)(gselectedgc_169133_2607990831)&7U)))!=0);
if (!(LOC3)) goto LA4;
LOC3 = containsgarbagecollectedref_320117_3876443242((*v0).loc.t);
LA4: ;
if (!LOC3) goto LA5;
prc0 = gentraverseprocforglobal_538032_839829468((*p0).module, v0);
LOC7 = (Ropeobj178006**)0;
LOC7 = procsec_529194_3723162438((*(*p0).module).initproc, ((Tcprocsection529011) 1));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = prc0;
appcg_532632_839829468((*p0).module, LOC7, ((NimStringDesc*) &T839829468_589), LOC8, 1);
}
LA5: ;
}
static N_INLINE(NIM_BOOL, isassignedimmediately_543781_839829468)(Tnode292802* n0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
{
if (!((*n0).kind == ((Tnodekind292020) 1))) goto LA3;
result0 = NIM_FALSE;
goto BeforeRet;
}
LA3: ;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = isinvalidreturntype_533550_839829468((*n0).typ);
if (!LOC7) goto LA8;
result0 = NIM_FALSE;
goto BeforeRet;
}
LA8: ;
result0 = NIM_TRUE;
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, genasgncall_543695_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0) {
{
Ttype292840* LOC3;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, 2048);
if (!((*LOC3).callconv == ((Tcallingconvention292002) 8))) goto LA4;
genclosurecall_540452_839829468(p0, le0, ri0, d0);
}
goto LA1;
LA4: ;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = ((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC7)) goto LA8;
LOC7 = (((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA8: ;
if (!LOC7) goto LA9;
geninfixcall_541929_839829468(p0, le0, ri0, d0);
}
goto LA1;
LA9: ;
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = ((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC12)) goto LA13;
LOC12 = (((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 28))&31U)))!=0);
LA13: ;
if (!LOC12) goto LA14;
gennamedparamcall_542616_839829468(p0, ri0, d0);
}
goto LA1;
LA14: ;
{
genprefixcall_539960_839829468(p0, le0, ri0, d0);
}
LA1: ;
poststmtactions_532942_839829468(p0);
}
static N_INLINE(void, loadinto_543928_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* a0) {
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*ri0).kind == ((Tnodekind292020) 27) || (*ri0).kind == ((Tnodekind292020) 29) || (*ri0).kind == ((Tnodekind292020) 30) || (*ri0).kind == ((Tnodekind292020) 31) || (*ri0).kind == ((Tnodekind292020) 26) || (*ri0).kind == ((Tnodekind292020) 28) || (*ri0).kind == ((Tnodekind292020) 32));
if (!(LOC3)) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = !(((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3)));
if (LOC5) goto LA6;
LOC5 = ((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).magic == ((Tmagic292524) 0));
LA6: ;
LOC3 = LOC5;
LA4: ;
if (!LOC3) goto LA7;
genasgncall_543695_839829468(p0, le0, ri0, a0);
}
goto LA1;
LA7: ;
{
if (!((*ri0).kind == ((Tnodekind292020) 47) || (*ri0).kind == ((Tnodekind292020) 65))) goto LA10;
genderef_543921_839829468(p0, ri0, a0, NIM_TRUE);
}
goto LA1;
LA10: ;
{
expr_539248_839829468(p0, ri0, a0);
}
LA1: ;
}
N_NIMCALL(void, gensinglevar_544276_839829468)(Tcproc529021* p0, Tnode292802* a0) {
Tsym292834* v0;
Tcproc529021* targetproc0;
{ v0 = (*(*a0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
if (!!(((1082130432 & (*v0).flags) == 0))) goto LA3;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 30))&31U)))!=0)) goto LA7;
gengotovar_544258_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 2)]);
}
LA7: ;
goto BeforeRet;
}
LA3: ;
targetproc0 = p0;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 3))&31U)))!=0)) goto LA11;
{
NIM_BOOL LOC15;
NIM_BOOL LOC16;
LOC15 = (NIM_BOOL)0;
LOC16 = (NIM_BOOL)0;
LOC16 = (((*v0).flags & 96) == 32);
if (!(LOC16)) goto LA17;
LOC16 = ((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 1));
LA17: ;
LOC15 = LOC16;
if (!(LOC15)) goto LA18;
LOC15 = !((((*v0).loc.flags & 72) == 0));
LA18: ;
if (!LOC15) goto LA19;
goto BeforeRet;
}
LA19: ;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0)) goto LA23;
targetproc0 = (*(*p0).module).preinitproc;
}
LA23: ;
assignglobalvar_538819_839829468(targetproc0, v0);
genobjectinit_538242_839829468((*(*p0).module).preinitproc, ((Tcprocsection529011) 1), (*v0).typ, (&(*v0).loc), NIM_TRUE);
{
NIM_BOOL LOC27;
LOC27 = (NIM_BOOL)0;
LOC27 = (((*v0).flags &(1U<<((NU)(((Tsymflag292184) 6))&31U)))!=0);
if (!(LOC27)) goto LA28;
LOC27 = !((generatedheader_532201_839829468 == NIM_NIL));
LA28: ;
if (!LOC27) goto LA29;
genvarprototypeaux_544254_839829468(generatedheader_532201_839829468, v0);
}
LA29: ;
registergcroot_543762_839829468(p0, v0);
}
goto LA9;
LA11: ;
{
Tnode292802* value0;
NIM_BOOL imm0;
value0 = (*a0).kindU.S6.sons->data[((NI) 2)];
imm0 = isassignedimmediately_543781_839829468(value0);
{
NIM_BOOL LOC34;
NIM_BOOL LOC35;
NIM_BOOL LOC36;
NIM_BOOL LOC38;
NIM_BOOL LOC42;
Ropeobj178006* decl0;
Tloc292816 tmp0;
LOC34 = (NIM_BOOL)0;
LOC35 = (NIM_BOOL)0;
LOC36 = (NIM_BOOL)0;
LOC36 = imm0;
if (!(LOC36)) goto LA37;
LOC38 = (NIM_BOOL)0;
LOC38 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC38) goto LA39;
LOC38 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA39: ;
LOC36 = LOC38;
LA37: ;
LOC35 = LOC36;
if (!(LOC35)) goto LA40;
LOC35 = ((*p0).splitdecls == ((NI) 0));
LA40: ;
LOC34 = LOC35;
if (!(LOC34)) goto LA41;
LOC42 = (NIM_BOOL)0;
LOC42 = containshiddenpointer_320120_3876443242((*v0).typ);
LOC34 = !(LOC42);
LA41: ;
if (!LOC34) goto LA43;
genlinedir_532823_839829468(p0, a0);
decl0 = localvardecl_538532_839829468(p0, v0);
memset((void*)(&tmp0), 0, sizeof(tmp0));
{
NIM_BOOL LOC47;
NIM_BOOL LOC48;
Tnode292802* LOC50;
Tnode292802* LOC52;
Ropeobj178006* params0;
Ttype292840* typ0;
TY532811 LOC66;
LOC47 = (NIM_BOOL)0;
LOC48 = (NIM_BOOL)0;
LOC48 = ((*value0).kind == ((Tnodekind292020) 27) || (*value0).kind == ((Tnodekind292020) 29) || (*value0).kind == ((Tnodekind292020) 30) || (*value0).kind == ((Tnodekind292020) 31) || (*value0).kind == ((Tnodekind292020) 26) || (*value0).kind == ((Tnodekind292020) 28) || (*value0).kind == ((Tnodekind292020) 32));
if (!(LOC48)) goto LA49;
LOC50 = (Tnode292802*)0;
LOC50 = HEX5BHEX5D_293238_850551059(value0, ((NI) 0));
LOC48 = ((*LOC50).kind == ((Tnodekind292020) 3));
LA49: ;
LOC47 = LOC48;
if (!(LOC47)) goto LA51;
LOC52 = (Tnode292802*)0;
LOC52 = HEX5BHEX5D_293238_850551059(value0, ((NI) 0));
LOC47 = (((*(*LOC52).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 24))&31U)))!=0);
LA51: ;
if (!LOC47) goto LA53;
params0 = (Ropeobj178006*)0;
typ0 = skiptypes_296099_850551059((*(*value0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
NI i_544619_839829468;
NI HEX3Atmp_544825_839829468;
NI LOC56;
NI res_544828_839829468;
i_544619_839829468 = (NI)0;
HEX3Atmp_544825_839829468 = (NI)0;
LOC56 = (NI)0;
LOC56 = len_293081_850551059(value0);
HEX3Atmp_544825_839829468 = (LOC56 - 1);
res_544828_839829468 = ((NI) 1);
{
while (1) {
Ropeobj178006* LOC65;
if (!(res_544828_839829468 <= HEX3Atmp_544825_839829468)) goto LA58;
i_544619_839829468 = res_544828_839829468;
{
TY533289 LOC63;
Ropeobj178006* LOC64;
if (!!((params0 == NIM_NIL))) goto LA61;
memset((void*)LOC63, 0, sizeof(LOC63));
LOC64 = (Ropeobj178006*)0;
LOC64 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC63, 0);
add_178482_2381377266(¶ms0, LOC64);
}
LA61: ;
LOC65 = (Ropeobj178006*)0;
LOC65 = genotherarg_539277_839829468(p0, value0, i_544619_839829468, typ0);
add_178482_2381377266(¶ms0, LOC65);
res_544828_839829468 += ((NI) 1);
} LA58: ;
}
}
memset((void*)LOC66, 0, sizeof(LOC66));
LOC66[0] = decl0;
LOC66[1] = params0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_590), LOC66, 2);
}
goto LA45;
LA53: ;
{
TY532811 LOC68;
initlocexprsingleuse_539289_839829468(p0, value0, (&tmp0));
memset((void*)LOC68, 0, sizeof(LOC68));
LOC68[0] = decl0;
LOC68[1] = rdloc_538188_839829468((&tmp0));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_591), LOC68, 2);
}
LA45: ;
goto BeforeRet;
}
LA43: ;
assignlocalvar_538614_839829468(p0, v0);
initlocalvar_538398_839829468(p0, v0, imm0);
}
LA9: ;
{
if (!!(((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 1)))) goto LA71;
genlinedir_532823_839829468(targetproc0, a0);
loadinto_543928_839829468(targetproc0, (*a0).kindU.S6.sons->data[((NI) 0)], (*a0).kindU.S6.sons->data[((NI) 2)], (&(*v0).loc));
}
LA71: ;
}BeforeRet: ;
}
N_NIMCALL(void, genclosurevar_544832_839829468)(Tcproc529021* p0, Tnode292802* a0) {
NIM_BOOL immediateasgn0;
immediateasgn0 = !(((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 1)));
{
Tloc292816 v0;
if (!immediateasgn0) goto LA3;
memset((void*)(&v0), 0, sizeof(v0));
initlocexpr_539283_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 0)], (&v0));
genlinedir_532823_839829468(p0, a0);
loadinto_543928_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 0)], (*a0).kindU.S6.sons->data[((NI) 2)], (&v0));
}
LA3: ;
}
N_NIMCALL(void, genvartuple_543794_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Tloc292816 tup0;
Tloc292816 field0;
NI L0;
NIM_BOOL uselowering0;
Ttype292840* t0;
{ memset((void*)(&tup0), 0, sizeof(tup0));
memset((void*)(&field0), 0, sizeof(field0));
{
if (!!(((*n0).kind == ((Tnodekind292020) 36)))) goto LA3;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_592));
}
LA3: ;
L0 = sonslen_295351_850551059(n0);
uselowering0 = NIM_FALSE;
{
NI i_543822_839829468;
NI HEX3Atmp_543905_839829468;
NI res_543908_839829468;
i_543822_839829468 = (NI)0;
HEX3Atmp_543905_839829468 = (NI)0;
HEX3Atmp_543905_839829468 = (NI)(L0 - ((NI) 3));
res_543908_839829468 = ((NI) 0);
{
while (1) {
if (!(res_543908_839829468 <= HEX3Atmp_543905_839829468)) goto LA7;
i_543822_839829468 = res_543908_839829468;
{
Tnode292802* LOC10;
LOC10 = (Tnode292802*)0;
LOC10 = HEX5BHEX5D_293238_850551059(n0, i_543822_839829468);
if (!!(((*LOC10).kind == ((Tnodekind292020) 3)))) goto LA11;
uselowering0 = NIM_TRUE;
goto LA5;
}
LA11: ;
res_543908_839829468 += ((NI) 1);
} LA7: ;
}
} LA5: ;
{
Tnode292802* LOC17;
if (!uselowering0) goto LA15;
LOC17 = (Tnode292802*)0;
LOC17 = lowertupleunpacking_433037_2218250499(n0, (*p0).prc);
genstmts_539244_839829468(p0, LOC17);
goto BeforeRet;
}
LA15: ;
genlinedir_532823_839829468(p0, n0);
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(L0 - ((NI) 1))], (&tup0));
t0 = getuniquetype_528640_2036603609(tup0.t);
{
NI i_543846_839829468;
NI HEX3Atmp_543914_839829468;
NI res_543917_839829468;
i_543846_839829468 = (NI)0;
HEX3Atmp_543914_839829468 = (NI)0;
HEX3Atmp_543914_839829468 = (NI)(L0 - ((NI) 3));
res_543917_839829468 = ((NI) 0);
{
while (1) {
if (!(res_543917_839829468 <= HEX3Atmp_543914_839829468)) goto LA20;
i_543846_839829468 = res_543917_839829468;
{
Tsym292834* v0;
v0 = (*(*n0).kindU.S6.sons->data[i_543846_839829468]).kindU.S4.sym;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 23))&31U)))!=0)) goto LA24;
goto LA21;
}
LA24: ;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 3))&31U)))!=0)) goto LA28;
assignglobalvar_538819_839829468(p0, v0);
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 1), (*v0).typ, (&(*v0).loc), NIM_TRUE);
registergcroot_543762_839829468(p0, v0);
}
goto LA26;
LA28: ;
{
Tnode292802* LOC31;
NIM_BOOL LOC32;
assignlocalvar_538614_839829468(p0, v0);
LOC31 = (Tnode292802*)0;
LOC31 = HEX5BHEX5D_293238_850551059(n0, (NI)(L0 - ((NI) 1)));
LOC32 = (NIM_BOOL)0;
LOC32 = isassignedimmediately_543781_839829468(LOC31);
initlocalvar_538398_839829468(p0, v0, LOC32);
}
LA26: ;
initloc_532273_839829468((&field0), ((Tlockind292808) 6), (*t0).sons->data[i_543846_839829468], tup0.s);
{
TY532811 LOC37;
if (!((*t0).kind == ((Ttypekind292244) 18))) goto LA35;
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = rdloc_538188_839829468((&tup0));
LOC37[1] = rope_178401_2381377266(((NI64) (i_543846_839829468)));
field0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_185), LOC37, 2);
}
goto LA33;
LA35: ;
{
TY532811 LOC43;
{
if (!!(((*(*(*t0).n).kindU.S6.sons->data[i_543846_839829468]).kind == ((Tnodekind292020) 3)))) goto LA41;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_592));
}
LA41: ;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC43[0] = rdloc_538188_839829468((&tup0));
LOC43[1] = manglerecfieldname_534361_839829468((*(*(*t0).n).kindU.S6.sons->data[i_543846_839829468]).kindU.S4.sym, t0);
field0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC43, 2);
}
LA33: ;
putlocintodest_539258_839829468(p0, (&(*v0).loc), (&field0));
} LA21: ;
res_543917_839829468 += ((NI) 1);
} LA20: ;
}
}
}BeforeRet: ;
}
N_NIMCALL(void, genvarstmt_544854_839829468)(Tcproc529021* p0, Tnode292802* n0) {
{
NI i_544869_839829468;
NI HEX3Atmp_544902_839829468;
NI LOC2;
NI res_544905_839829468;
i_544869_839829468 = (NI)0;
HEX3Atmp_544902_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(n0);
HEX3Atmp_544902_839829468 = (NI)(LOC2 - ((NI) 1));
res_544905_839829468 = ((NI) 0);
{
while (1) {
if (!(res_544905_839829468 <= HEX3Atmp_544902_839829468)) goto LA4;
i_544869_839829468 = res_544905_839829468;
{
Tnode292802* a0;
a0 = (*n0).kindU.S6.sons->data[i_544869_839829468];
{
if (!((*a0).kind == ((Tnodekind292020) 125))) goto LA8;
goto LA5;
}
LA8: ;
{
if (!((*a0).kind == ((Tnodekind292020) 35))) goto LA12;
{
if (!((*(*a0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3))) goto LA16;
gensinglevar_544276_839829468(p0, a0);
}
goto LA14;
LA16: ;
{
genclosurevar_544832_839829468(p0, a0);
}
LA14: ;
}
goto LA10;
LA12: ;
{
genvartuple_543794_839829468(p0, a0);
}
LA10: ;
} LA5: ;
res_544905_839829468 += ((NI) 1);
} LA4: ;
}
}
}
static N_INLINE(NIM_BOOL, emitlazily_532248_839829468)(Tsym292834* s0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
Tsym292834* LOC3;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = ((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 2))&63U)))!=0);
if (LOC1) goto LA2;
LOC3 = (Tsym292834*)0;
LOC3 = getmodule_299123_2984716966(s0);
LOC1 = (((*LOC3).flags &(1U<<((NU)(((Tsymflag292184) 25))&31U)))!=0);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, genconststmt_544909_839829468)(Tcproc529021* p0, Tnode292802* t0) {
{
NI i_544924_839829468;
NI HEX3Atmp_544975_839829468;
NI LOC2;
NI res_544978_839829468;
i_544924_839829468 = (NI)0;
HEX3Atmp_544975_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(t0);
HEX3Atmp_544975_839829468 = (NI)(LOC2 - ((NI) 1));
res_544978_839829468 = ((NI) 0);
{
while (1) {
if (!(res_544978_839829468 <= HEX3Atmp_544975_839829468)) goto LA4;
i_544924_839829468 = res_544978_839829468;
{
Tnode292802* it0;
Tsym292834* c0;
it0 = (*t0).kindU.S6.sons->data[i_544924_839829468];
{
if (!((*it0).kind == ((Tnodekind292020) 125))) goto LA8;
goto LA5;
}
LA8: ;
{
if (!!(((*it0).kind == ((Tnodekind292020) 102)))) goto LA12;
internalerror_196100_155036129((*t0).info, ((NimStringDesc*) &T839829468_593));
}
LA12: ;
c0 = (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
NIM_BOOL LOC16;
LOC16 = (NIM_BOOL)0;
LOC16 = containscompiletimeonly_328721_3876443242((*c0).typ);
if (!LOC16) goto LA17;
goto LA5;
}
goto LA14;
LA17: ;
{
NIM_BOOL LOC20;
NIM_BOOL LOC21;
NI LOC24;
LOC20 = (NIM_BOOL)0;
LOC21 = (NIM_BOOL)0;
LOC21 = ((17629200 &((NU64)1<<((NU)((*(*c0).typ).kind)&63U)))!=0);
if (!(LOC21)) goto LA22;
LOC21 = !((((*c0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0));
LA22: ;
LOC20 = LOC21;
if (!(LOC20)) goto LA23;
LOC24 = (NI)0;
LOC24 = len_293081_850551059((*c0).ast);
LOC20 = !((LOC24 == ((NI) 0)));
LA23: ;
if (!LOC20) goto LA25;
{
NIM_BOOL LOC29;
LOC29 = (NIM_BOOL)0;
LOC29 = emitlazily_532248_839829468(c0);
if (!!(LOC29)) goto LA30;
requestconstimpl_539240_839829468(p0, c0);
}
LA30: ;
}
goto LA14;
LA25: ;
LA14: ;
} LA5: ;
res_544978_839829468 += ((NI) 1);
} LA4: ;
}
}
}
N_NIMCALL(void, gencasestringbranch_547100_839829468)(Tcproc529021* p0, Tnode292802* b0, Tloc292816* e0, Ropeobj178006* labl0, Ropeobj178006** branches0, NI branches0Len0) {
Tloc292816 x0;
NI length0;
memset((void*)(&x0), 0, sizeof(x0));
length0 = sonslen_295351_850551059(b0);
{
NI i_547122_839829468;
NI HEX3Atmp_547410_839829468;
NI res_547413_839829468;
i_547122_839829468 = (NI)0;
HEX3Atmp_547410_839829468 = (NI)0;
HEX3Atmp_547410_839829468 = (NI)(length0 - ((NI) 2));
res_547413_839829468 = ((NI) 0);
{
while (1) {
NI j0;
NI64 LOC4;
TY535238 LOC5;
if (!(res_547413_839829468 <= HEX3Atmp_547410_839829468)) goto LA3;
i_547122_839829468 = res_547413_839829468;
initlocexpr_539283_839829468(p0, (*b0).kindU.S6.sons->data[i_547122_839829468], (&x0));
LOC4 = (NI64)0;
LOC4 = hashstring_528100_2036603609((*(*b0).kindU.S6.sons->data[i_547122_839829468]).kindU.S3.strval);
j0 = ((NI) ((NI64)(LOC4 & ((NI64) ((branches0Len0-1))))));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468(e0);
LOC5[1] = rdloc_538188_839829468((&x0));
LOC5[2] = labl0;
appcg_532632_839829468((*p0).module, &branches0[j0], ((NimStringDesc*) &T839829468_595), LOC5, 3);
res_547413_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(void, exprblock_544103_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
TY533289 LOC1;
NI LOC2;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (NI)0;
LOC2 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC1, 0);
expr_539248_839829468(p0, n0, d0);
endblock_544060_839829468(p0);
}
N_NIMCALL(Ropeobj178006*, gencasesecondpass_546965_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NI labid0, NI until0) {
Ropeobj178006* result0;
Ropeobj178006* lend0;
result0 = (Ropeobj178006*)0;
lend0 = getlabel_539217_839829468(p0);
{
NI i_546984_839829468;
NI res_547017_839829468;
i_546984_839829468 = (NI)0;
res_547017_839829468 = ((NI) 1);
{
while (1) {
TY178507 LOC10;
if (!(res_547017_839829468 <= until0)) goto LA3;
i_546984_839829468 = res_547017_839829468;
{
NIM_BOOL LOC6;
LOC6 = (NIM_BOOL)0;
LOC6 = ((*d0).k == ((Tlockind292808) 1));
if (!(LOC6)) goto LA7;
LOC6 = isemptytype_297441_850551059((*t0).typ);
LA7: ;
if (!LOC6) goto LA8;
(*d0).k = ((Tlockind292808) 0);
}
LA8: ;
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rope_178401_2381377266(((NI64) ((NI)(labid0 + i_546984_839829468))));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_599), LOC10, 1);
{
NI length0;
TY178507 LOC15;
if (!((*(*t0).kindU.S6.sons->data[i_546984_839829468]).kind == ((Tnodekind292020) 85))) goto LA13;
length0 = sonslen_295351_850551059((*t0).kindU.S6.sons->data[i_546984_839829468]);
exprblock_544103_839829468(p0, (*(*t0).kindU.S6.sons->data[i_546984_839829468]).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))], d0);
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = lend0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_556), LOC15, 1);
}
goto LA11;
LA13: ;
{
exprblock_544103_839829468(p0, (*(*t0).kindU.S6.sons->data[i_546984_839829468]).kindU.S6.sons->data[((NI) 0)], d0);
}
LA11: ;
res_547017_839829468 += ((NI) 1);
} LA3: ;
}
}
result0 = lend0;
return result0;
}
N_NIMCALL(void, gencasegenericbranch_546910_839829468)(Tcproc529021* p0, Tnode292802* b0, Tloc292816* e0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, Ropeobj178006* labl0) {
Tloc292816 x0;
Tloc292816 y0;
NI length0;
memset((void*)(&x0), 0, sizeof(x0));
memset((void*)(&y0), 0, sizeof(y0));
length0 = sonslen_295351_850551059(b0);
{
NI i_546932_839829468;
NI HEX3Atmp_546958_839829468;
NI res_546961_839829468;
i_546932_839829468 = (NI)0;
HEX3Atmp_546958_839829468 = (NI)0;
HEX3Atmp_546958_839829468 = (NI)(length0 - ((NI) 2));
res_546961_839829468 = ((NI) 0);
{
while (1) {
if (!(res_546961_839829468 <= HEX3Atmp_546958_839829468)) goto LA3;
i_546932_839829468 = res_546961_839829468;
{
TY535235 LOC8;
if (!((*(*b0).kindU.S6.sons->data[i_546932_839829468]).kind == ((Tnodekind292020) 44))) goto LA6;
initlocexpr_539283_839829468(p0, (*(*b0).kindU.S6.sons->data[i_546932_839829468]).kindU.S6.sons->data[((NI) 0)], (&x0));
initlocexpr_539283_839829468(p0, (*(*b0).kindU.S6.sons->data[i_546932_839829468]).kindU.S6.sons->data[((NI) 1)], (&y0));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdcharloc_538227_839829468(e0);
LOC8[1] = rdcharloc_538227_839829468((&x0));
LOC8[2] = rdcharloc_538227_839829468((&y0));
LOC8[3] = labl0;
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), rangeformat0, LOC8, 4);
}
goto LA4;
LA6: ;
{
TY535238 LOC10;
initlocexpr_539283_839829468(p0, (*b0).kindU.S6.sons->data[i_546932_839829468], (&x0));
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rdcharloc_538227_839829468(e0);
LOC10[1] = rdcharloc_538227_839829468((&x0));
LOC10[2] = labl0;
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), eqformat0, LOC10, 3);
}
LA4: ;
res_546961_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(Ropeobj178006*, genifforcaseuntil_547021_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, NI until0, Tloc292816* a0) {
Ropeobj178006* result0;
NI labid0;
result0 = (Ropeobj178006*)0;
labid0 = (*p0).labels;
{
NI i_547042_839829468;
NI res_547083_839829468;
i_547042_839829468 = (NI)0;
res_547083_839829468 = ((NI) 1);
{
while (1) {
if (!(res_547083_839829468 <= until0)) goto LA3;
i_547042_839829468 = res_547083_839829468;
(*p0).labels += ((NI) 1);
{
Ropeobj178006* LOC8;
Ropeobj178006* LOC9;
if (!((*(*t0).kindU.S6.sons->data[i_547042_839829468]).kind == ((Tnodekind292020) 85))) goto LA6;
LOC8 = (Ropeobj178006*)0;
LOC8 = rope_178401_2381377266(((NI64) ((*p0).labels)));
LOC9 = (Ropeobj178006*)0;
LOC9 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_296), LOC8);
gencasegenericbranch_546910_839829468(p0, (*t0).kindU.S6.sons->data[i_547042_839829468], a0, rangeformat0, eqformat0, LOC9);
}
goto LA4;
LA6: ;
{
TY178507 LOC11;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = rope_178401_2381377266(((NI64) ((*p0).labels)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_598), LOC11, 1);
}
LA4: ;
res_547083_839829468 += ((NI) 1);
} LA3: ;
}
}
{
NI LOC14;
NI gototarget0;
TY178507 LOC17;
TY178507 LOC18;
LOC14 = (NI)0;
LOC14 = len_293081_850551059(t0);
if (!(until0 < (NI)(LOC14 - ((NI) 1)))) goto LA15;
(*p0).labels += ((NI) 1);
gototarget0 = (*p0).labels;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = rope_178401_2381377266(((NI64) (gototarget0)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_598), LOC17, 1);
result0 = gencasesecondpass_546965_839829468(p0, t0, d0, ((NI) (labid0)), until0);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rope_178401_2381377266(((NI64) (gototarget0)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_599), LOC18, 1);
}
goto LA12;
LA15: ;
{
result0 = gencasesecondpass_546965_839829468(p0, t0, d0, ((NI) (labid0)), until0);
}
LA12: ;
return result0;
}
N_NIMCALL(void, gencasegeneric_547087_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0) {
Tloc292816 a0;
Ropeobj178006* lend0;
NI LOC1;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC1 = (NI)0;
LOC1 = sonslen_295351_850551059(t0);
lend0 = genifforcaseuntil_547021_839829468(p0, t0, d0, rangeformat0, eqformat0, (NI)(LOC1 - ((NI) 1)), (&a0));
fixlabel_539230_839829468(p0, lend0);
}
N_NIMCALL(void, genstringcase_547417_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
NI strings0;
strings0 = ((NI) 0);
{
NI i_547435_839829468;
NI HEX3Atmp_547550_839829468;
NI LOC2;
NI res_547553_839829468;
i_547435_839829468 = (NI)0;
HEX3Atmp_547550_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(t0);
HEX3Atmp_547550_839829468 = (NI)(LOC2 - ((NI) 1));
res_547553_839829468 = ((NI) 1);
{
while (1) {
if (!(res_547553_839829468 <= HEX3Atmp_547550_839829468)) goto LA4;
i_547435_839829468 = res_547553_839829468;
{
NI LOC9;
if (!((*(*t0).kindU.S6.sons->data[i_547435_839829468]).kind == ((Tnodekind292020) 85))) goto LA7;
LOC9 = (NI)0;
LOC9 = sonslen_295351_850551059((*t0).kindU.S6.sons->data[i_547435_839829468]);
strings0 += (NI)(LOC9 - ((NI) 1));
}
LA7: ;
res_547553_839829468 += ((NI) 1);
} LA4: ;
}
}
{
NI bitmask0;
NI LOC14;
TY191350* branches0;
Tloc292816 a0;
NI labid0;
TY532811 LOC26;
TY533289 LOC35;
Ropeobj178006* lend0;
NI LOC42;
if (!(((NI) 8) < strings0)) goto LA12;
LOC14 = (NI)0;
LOC14 = nextpoweroftwo_101629_1009420244(strings0);
bitmask0 = (NI)(LOC14 - ((NI) 1));
branches0 = (TY191350*)0;
branches0 = (TY191350*) newSeq((&NTI191350), ((NI) ((NI)(bitmask0 + ((NI) 1)))));
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0));
labid0 = (*p0).labels;
{
NI i_547484_839829468;
NI HEX3Atmp_547560_839829468;
NI LOC16;
NI res_547563_839829468;
i_547484_839829468 = (NI)0;
HEX3Atmp_547560_839829468 = (NI)0;
LOC16 = (NI)0;
LOC16 = sonslen_295351_850551059(t0);
HEX3Atmp_547560_839829468 = (NI)(LOC16 - ((NI) 1));
res_547563_839829468 = ((NI) 1);
{
while (1) {
if (!(res_547563_839829468 <= HEX3Atmp_547560_839829468)) goto LA18;
i_547484_839829468 = res_547563_839829468;
(*p0).labels += ((NI) 1);
{
Ropeobj178006* LOC23;
Ropeobj178006* LOC24;
if (!((*(*t0).kindU.S6.sons->data[i_547484_839829468]).kind == ((Tnodekind292020) 85))) goto LA21;
LOC23 = (Ropeobj178006*)0;
LOC23 = rope_178401_2381377266(((NI64) ((*p0).labels)));
LOC24 = (Ropeobj178006*)0;
LOC24 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_296), LOC23);
gencasestringbranch_547100_839829468(p0, (*t0).kindU.S6.sons->data[i_547484_839829468], (&a0), LOC24, branches0->data, branches0->Sup.len);
}
goto LA19;
LA21: ;
{
}
LA19: ;
res_547563_839829468 += ((NI) 1);
} LA18: ;
}
}
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = rdloc_538188_839829468((&a0));
LOC26[1] = rope_178401_2381377266(((NI64) (bitmask0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_596), LOC26, 2);
{
NI j_547518_839829468;
NI HEX3Atmp_547568_839829468;
NI res_547571_839829468;
j_547518_839829468 = (NI)0;
HEX3Atmp_547568_839829468 = (NI)0;
HEX3Atmp_547568_839829468 = (branches0 ? (branches0->Sup.len-1) : -1);
res_547571_839829468 = ((NI) 0);
{
while (1) {
if (!(res_547571_839829468 <= HEX3Atmp_547568_839829468)) goto LA29;
j_547518_839829468 = res_547571_839829468;
{
TY532811 LOC34;
if (!!((branches0->data[j_547518_839829468] == NIM_NIL))) goto LA32;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = intliteral_539270_839829468(((NI64) (j_547518_839829468)));
LOC34[1] = branches0->data[j_547518_839829468];
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_597), LOC34, 2);
}
LA32: ;
res_547571_839829468 += ((NI) 1);
} LA29: ;
}
}
memset((void*)LOC35, 0, sizeof(LOC35));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC35, 0);
{
NI LOC38;
TY178507 LOC41;
LOC38 = (NI)0;
LOC38 = sonslen_295351_850551059(t0);
if (!!(((*(*t0).kindU.S6.sons->data[(NI)(LOC38 - ((NI) 1))]).kind == ((Tnodekind292020) 85)))) goto LA39;
memset((void*)LOC41, 0, sizeof(LOC41));
LOC41[0] = rope_178401_2381377266(((NI64) ((*p0).labels)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_598), LOC41, 1);
}
LA39: ;
LOC42 = (NI)0;
LOC42 = sonslen_295351_850551059(t0);
lend0 = gencasesecondpass_546965_839829468(p0, t0, d0, ((NI) (labid0)), (NI)(LOC42 - ((NI) 1)));
fixlabel_539230_839829468(p0, lend0);
}
goto LA10;
LA12: ;
{
gencasegeneric_547087_839829468(p0, t0, d0, ((NimStringDesc*) &T839829468_490), ((NimStringDesc*) &T839829468_595));
}
LA10: ;
}
N_NIMCALL(void, gengotoforcase_545673_839829468)(Tcproc529021* p0, Tnode292802* casestmt0) {
{ {
NI i_545695_839829468;
NI HEX3Atmp_545737_839829468;
NI LOC2;
NI res_545740_839829468;
i_545695_839829468 = (NI)0;
HEX3Atmp_545737_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = len_293081_850551059(casestmt0);
HEX3Atmp_545737_839829468 = (LOC2 - 1);
res_545740_839829468 = ((NI) 1);
{
while (1) {
TY533289 LOC5;
NI LOC6;
Tnode292802* it0;
Tnode292802* LOC16;
if (!(res_545740_839829468 <= HEX3Atmp_545737_839829468)) goto LA4;
i_545695_839829468 = res_545740_839829468;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC6 = (NI)0;
LOC6 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC5, 0);
it0 = (*casestmt0).kindU.S6.sons->data[i_545695_839829468];
{
NI j_545711_839829468;
NI HEX3Atmp_545730_839829468;
NI LOC8;
NI res_545733_839829468;
j_545711_839829468 = (NI)0;
HEX3Atmp_545730_839829468 = (NI)0;
LOC8 = (NI)0;
LOC8 = len_293081_850551059(it0);
HEX3Atmp_545730_839829468 = (NI)(LOC8 - ((NI) 2));
res_545733_839829468 = ((NI) 0);
{
while (1) {
NI64 val0;
TY178507 LOC15;
if (!(res_545733_839829468 <= HEX3Atmp_545730_839829468)) goto LA10;
j_545711_839829468 = res_545733_839829468;
{
if (!((*(*it0).kindU.S6.sons->data[j_545711_839829468]).kind == ((Tnodekind292020) 44))) goto LA13;
localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_579));
goto BeforeRet;
}
LA13: ;
val0 = getordvalue_320129_3876443242((*it0).kindU.S6.sons->data[j_545711_839829468]);
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = rope_178401_2381377266(val0);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_602), LOC15, 1);
res_545733_839829468 += ((NI) 1);
} LA10: ;
}
}
LOC16 = (Tnode292802*)0;
LOC16 = lastson_295364_850551059(it0);
genstmts_539244_839829468(p0, LOC16);
endblock_544060_839829468(p0);
res_545740_839829468 += ((NI) 1);
} LA4: ;
}
}
}BeforeRet: ;
}
N_NIMCALL(NIM_BOOL, branchhastoobigrange_547576_839829468)(Tnode292802* b0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
{
NI i_547591_839829468;
NI HEX3Atmp_547609_839829468;
NI LOC2;
NI res_547612_839829468;
i_547591_839829468 = (NI)0;
HEX3Atmp_547609_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(b0);
HEX3Atmp_547609_839829468 = (NI)(LOC2 - ((NI) 2));
res_547612_839829468 = ((NI) 0);
{
while (1) {
if (!(res_547612_839829468 <= HEX3Atmp_547609_839829468)) goto LA4;
i_547591_839829468 = res_547612_839829468;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = ((*(*b0).kindU.S6.sons->data[i_547591_839829468]).kind == ((Tnodekind292020) 44));
if (!(LOC7)) goto LA8;
LOC7 = (IL64(256) < (NI64)((*(*(*b0).kindU.S6.sons->data[i_547591_839829468]).kindU.S6.sons->data[((NI) 1)]).kindU.S1.intval - (*(*(*b0).kindU.S6.sons->data[i_547591_839829468]).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval));
LA8: ;
if (!LOC7) goto LA9;
result0 = NIM_TRUE;
goto BeforeRet;
}
LA9: ;
res_547612_839829468 += ((NI) 1);
} LA4: ;
}
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(NI, ifswitchsplitpoint_547616_839829468)(Tcproc529021* p0, Tnode292802* n0) {
NI result0;
result0 = (NI)0;
{
NI i_547631_839829468;
NI HEX3Atmp_547655_839829468;
NI LOC2;
NI res_547658_839829468;
i_547631_839829468 = (NI)0;
HEX3Atmp_547655_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = len_293081_850551059(n0);
HEX3Atmp_547655_839829468 = (NI)(LOC2 - ((NI) 1));
res_547658_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* branch0;
Tnode292802* stmtblock0;
if (!(res_547658_839829468 <= HEX3Atmp_547655_839829468)) goto LA4;
i_547631_839829468 = res_547658_839829468;
branch0 = HEX5BHEX5D_293238_850551059(n0, i_547631_839829468);
stmtblock0 = lastson_295364_850551059(branch0);
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = stmtscontainpragma_528083_2036603609(stmtblock0, ((Tspecialword275003) 181));
if (!LOC7) goto LA8;
result0 = i_547631_839829468;
}
goto LA5;
LA8: ;
{
if (!!(((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 0))&7U)))!=0))) goto LA11;
{
NIM_BOOL LOC15;
LOC15 = (NIM_BOOL)0;
LOC15 = ((*branch0).kind == ((Tnodekind292020) 85));
if (!(LOC15)) goto LA16;
LOC15 = branchhastoobigrange_547576_839829468(branch0);
LA16: ;
if (!LOC15) goto LA17;
result0 = i_547631_839829468;
}
LA17: ;
}
goto LA5;
LA11: ;
LA5: ;
res_547658_839829468 += ((NI) 1);
} LA4: ;
}
}
return result0;
}
N_NIMCALL(void, genordinalcase_547725_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
NI splitpoint0;
Tloc292816 a0;
Ropeobj178006* lend0;
splitpoint0 = ifswitchsplitpoint_547616_839829468(p0, n0);
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
{
if (!(((NI) 0) < splitpoint0)) goto LA3;
lend0 = genifforcaseuntil_547021_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_600), ((NimStringDesc*) &T839829468_601), splitpoint0, (&a0));
}
goto LA1;
LA3: ;
{
lend0 = NIM_NIL;
}
LA1: ;
{
NI LOC8;
TY178507 LOC11;
NIM_BOOL hasdefault0;
TY533289 LOC37;
LOC8 = (NI)0;
LOC8 = len_293081_850551059(n0);
if (!((NI)(splitpoint0 + ((NI) 1)) < LOC8)) goto LA9;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = rdcharloc_538227_839829468((&a0));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_603), LOC11, 1);
hasdefault0 = NIM_FALSE;
{
NI i_547758_839829468;
NI HEX3Atmp_547817_839829468;
NI HEX3Atmp_547818_839829468;
NI LOC13;
NI res_547821_839829468;
i_547758_839829468 = (NI)0;
HEX3Atmp_547817_839829468 = (NI)0;
HEX3Atmp_547818_839829468 = (NI)0;
HEX3Atmp_547817_839829468 = (NI)(splitpoint0 + ((NI) 1));
LOC13 = (NI)0;
LOC13 = len_293081_850551059(n0);
HEX3Atmp_547818_839829468 = (LOC13 - 1);
res_547821_839829468 = HEX3Atmp_547817_839829468;
{
while (1) {
Tnode292802* branch0;
Tnode292802* LOC28;
TY533289 LOC29;
if (!(res_547821_839829468 <= HEX3Atmp_547818_839829468)) goto LA15;
i_547758_839829468 = res_547821_839829468;
{
NIM_BOOL LOC18;
LOC18 = (NIM_BOOL)0;
LOC18 = ((*d0).k == ((Tlockind292808) 1));
if (!(LOC18)) goto LA19;
LOC18 = isemptytype_297441_850551059((*n0).typ);
LA19: ;
if (!LOC18) goto LA20;
(*d0).k = ((Tlockind292808) 0);
}
LA20: ;
branch0 = HEX5BHEX5D_293238_850551059(n0, i_547758_839829468);
{
if (!((*branch0).kind == ((Tnodekind292020) 85))) goto LA24;
gencaserange_537028_839829468(p0, branch0);
}
goto LA22;
LA24: ;
{
TY533289 LOC27;
memset((void*)LOC27, 0, sizeof(LOC27));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_181), LOC27, 0);
hasdefault0 = NIM_TRUE;
}
LA22: ;
LOC28 = (Tnode292802*)0;
LOC28 = lastson_295364_850551059(branch0);
exprblock_544103_839829468(p0, LOC28, d0);
memset((void*)LOC29, 0, sizeof(LOC29));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_182), LOC29, 0);
res_547821_839829468 += ((NI) 1);
} LA15: ;
}
}
{
NIM_BOOL LOC32;
TY533289 LOC36;
LOC32 = (NIM_BOOL)0;
LOC32 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 3))&7U)))!=0);
if (!(LOC32)) goto LA33;
LOC32 = !(hasdefault0);
LA33: ;
if (!LOC32) goto LA34;
memset((void*)LOC36, 0, sizeof(LOC36));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_604), LOC36, 0);
}
LA34: ;
memset((void*)LOC37, 0, sizeof(LOC37));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC37, 0);
}
LA9: ;
{
if (!!((lend0 == NIM_NIL))) goto LA40;
fixlabel_539230_839829468(p0, lend0);
}
LA40: ;
}
N_NIMCALL(void, gencase_547827_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
Ttype292840* LOC8;
genlinedir_532823_839829468(p0, t0);
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_297441_850551059((*t0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind292808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA6: ;
LOC8 = (Ttype292840*)0;
LOC8 = skiptypes_296099_850551059((*(*t0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106242013440));
switch ((*LOC8).kind) {
case ((Ttypekind292244) 28):
{
genstringcase_547417_839829468(p0, t0, d0);
}
break;
case ((Ttypekind292244) 36) ... ((Ttypekind292244) 39):
{
gencasegeneric_547087_839829468(p0, t0, d0, ((NimStringDesc*) &T839829468_600), ((NimStringDesc*) &T839829468_601));
}
break;
default:
{
{
NIM_BOOL LOC14;
LOC14 = (NIM_BOOL)0;
LOC14 = ((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC14)) goto LA15;
LOC14 = (((*(*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 30))&31U)))!=0);
LA15: ;
if (!LOC14) goto LA16;
gengotoforcase_545673_839829468(p0, t0);
}
goto LA12;
LA16: ;
{
genordinalcase_547725_839829468(p0, t0, d0);
}
LA12: ;
}
break;
}
}
static N_INLINE(Tnode292802*, pop_318246_1689653243)(Tnodeseq292796** s0) {
Tnode292802* result0;
NI L0;
result0 = (Tnode292802*)0;
L0 = (NI)(((*s0) ? (*s0)->Sup.len : 0) - ((NI) 1));
result0 = (*s0)->data[L0];
(*s0) = (Tnodeseq292796*) setLengthSeq(&((*s0))->Sup, sizeof(Tnode292802*), ((NI) (L0)));
return result0;
}
N_NIMCALL(void, blockleaveactions_545442_839829468)(Tcproc529021* p0, NI howmanytrys0, NI howmanyexcepts0) {
Tnodeseq292796* stack0;
NI alreadypoppedcnt0;
stack0 = (Tnodeseq292796*)0;
stack0 = (Tnodeseq292796*) newSeq((&NTI292796), ((NI) 0));
alreadypoppedcnt0 = (*p0).inexceptblock;
{
NI i_545471_839829468;
NI res_545596_839829468;
i_545471_839829468 = (NI)0;
res_545596_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* trystmt0;
Tnode292802* finallystmt0;
if (!(res_545596_839829468 <= howmanytrys0)) goto LA3;
i_545471_839829468 = res_545596_839829468;
{
NIM_BOOL LOC6;
LOC6 = (NIM_BOOL)0;
LOC6 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC6) goto LA7;
LOC6 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA7: ;
if (!!(LOC6)) goto LA8;
{
if (!(((NI) 0) < alreadypoppedcnt0)) goto LA12;
alreadypoppedcnt0 -= ((NI) 1);
}
goto LA10;
LA12: ;
{
TY533289 LOC15;
memset((void*)LOC15, 0, sizeof(LOC15));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_605), LOC15, 0);
}
LA10: ;
}
LA8: ;
trystmt0 = pop_318246_1689653243((&(*p0).nestedtrystmts));
stack0 = (Tnodeseq292796*) incrSeqV2(&(stack0)->Sup, sizeof(Tnode292802*));
asgnRefNoCycle((void**) (&stack0->data[stack0->Sup.len]), trystmt0);
++stack0->Sup.len;
finallystmt0 = lastson_295364_850551059(trystmt0);
{
if (!((*finallystmt0).kind == ((Tnodekind292020) 107))) goto LA18;
genstmts_539244_839829468(p0, (*finallystmt0).kindU.S6.sons->data[((NI) 0)]);
}
LA18: ;
res_545596_839829468 += ((NI) 1);
} LA3: ;
}
}
{
NI i_545546_839829468;
NI HEX3Atmp_545601_839829468;
NI res_545604_839829468;
i_545546_839829468 = (NI)0;
HEX3Atmp_545601_839829468 = (NI)0;
HEX3Atmp_545601_839829468 = (NI)(howmanytrys0 - ((NI) 1));
res_545604_839829468 = HEX3Atmp_545601_839829468;
{
while (1) {
if (!(((NI) 0) <= res_545604_839829468)) goto LA22;
i_545546_839829468 = res_545604_839829468;
(*p0).nestedtrystmts = (Tnodeseq292796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode292802*));
asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), stack0->data[i_545546_839829468]);
++(*p0).nestedtrystmts->Sup.len;
res_545604_839829468 -= ((NI) 1);
} LA22: ;
}
}
{
NIM_BOOL LOC25;
LOC25 = (NIM_BOOL)0;
LOC25 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC25) goto LA26;
LOC25 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA26: ;
if (!!(LOC25)) goto LA27;
{
NI i_545587_839829468;
NI HEX3Atmp_545610_839829468;
NI res_545613_839829468;
i_545587_839829468 = (NI)0;
HEX3Atmp_545610_839829468 = (NI)0;
HEX3Atmp_545610_839829468 = (NI)(howmanyexcepts0 - ((NI) 1));
res_545613_839829468 = HEX3Atmp_545610_839829468;
{
while (1) {
TY533289 LOC32;
if (!(((NI) 0) <= res_545613_839829468)) goto LA31;
i_545587_839829468 = res_545613_839829468;
memset((void*)LOC32, 0, sizeof(LOC32));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC32, 0);
res_545613_839829468 -= ((NI) 1);
} LA31: ;
}
}
}
LA27: ;
}
N_NIMCALL(void, genreturnstmt_545617_839829468)(Tcproc529021* p0, Tnode292802* t0) {
TY533289 LOC14;
{ {
if (!(((*t0).flags &(1U<<((NU)(((Tnodeflag292427) 14))&15U)))!=0)) goto LA3;
goto BeforeRet;
}
LA3: ;
(*p0).beforeretneeded = NIM_TRUE;
genlinedir_532823_839829468(p0, t0);
{
if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA7;
genstmts_539244_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)]);
}
LA7: ;
blockleaveactions_545442_839829468(p0, ((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0), (*p0).inexceptblock);
{
Ropeobj178006* safepoint0;
TY178507 LOC13;
if (!(((NI) 0) < ((*p0).finallysafepoints ? (*p0).finallysafepoints->Sup.len : 0))) goto LA11;
safepoint0 = (*p0).finallysafepoints->data[(NI)(((*p0).finallysafepoints ? (*p0).finallysafepoints->Sup.len : 0) - ((NI) 1))];
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_607), LOC13, 1);
}
LA11: ;
memset((void*)LOC14, 0, sizeof(LOC14));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_608), LOC14, 0);
}BeforeRet: ;
}
N_NIMCALL(void, genbreakstmt_546444_839829468)(Tcproc529021* p0, Tnode292802* t0) {
NI idx0;
Ropeobj178006* label0;
TY178507 LOC16;
idx0 = (*p0).breakidx;
{
Tsym292834* sym0;
if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA3;
sym0 = (*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
idx0 = (NI)((*sym0).position - ((NI) 1));
}
goto LA1;
LA3: ;
{
{
while (1) {
NIM_BOOL LOC8;
LOC8 = (NIM_BOOL)0;
LOC8 = (((NI) 0) <= idx0);
if (!(LOC8)) goto LA9;
LOC8 = !((*p0).blocks->data[idx0].isloop);
LA9: ;
if (!LOC8) goto LA7;
idx0 -= ((NI) 1);
} LA7: ;
}
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = (idx0 < ((NI) 0));
if (LOC12) goto LA13;
LOC12 = !((*p0).blocks->data[idx0].isloop);
LA13: ;
if (!LOC12) goto LA14;
internalerror_196100_155036129((*t0).info, ((NimStringDesc*) &T839829468_609));
}
LA14: ;
}
LA1: ;
label0 = assignlabel_544020_839829468((&(*p0).blocks->data[idx0]));
blockleaveactions_545442_839829468(p0, (NI)(((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0) - ((NI) ((*p0).blocks->data[idx0].nestedtrystmts))), (NI)((*p0).inexceptblock - ((NI) ((*p0).blocks->data[idx0].nestedexceptstmts))));
genlinedir_532823_839829468(p0, t0);
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = label0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_556), LOC16, 1);
}
N_NIMCALL(NIM_BOOL, fielddiscriminantcheckneeded_549080_839829468)(Tcproc529021* p0, Tnode292802* asgn0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
{
Tnode292802* le0;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 2))&31U)))!=0)) goto LA3;
le0 = (*asgn0).kindU.S6.sons->data[((NI) 0)];
{
Tsym292834* field0;
if (!((*le0).kind == ((Tnodekind292020) 46))) goto LA7;
field0 = (*(*(*le0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym;
result0 = (((*field0).flags &(1U<<((NU)(((Tsymflag292184) 18))&31U)))!=0);
}
goto LA5;
LA7: ;
{
Tsym292834* field0;
if (!((*le0).kind == ((Tnodekind292020) 45))) goto LA10;
field0 = (*(*le0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym;
result0 = (((*field0).flags &(1U<<((NU)(((Tsymflag292184) 18))&31U)))!=0);
}
goto LA5;
LA10: ;
LA5: ;
}
LA3: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, discriminatortabledecl_536094_839829468)(Tcgen529027* m0, Ttype292840* objtype0, Tsym292834* d0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
Ropeobj178006* tmp0;
TY532811 LOC2;
NI64 LOC3;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_130));
tmp0 = discriminatortablename_536057_839829468(m0, objtype0, d0);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = tmp0;
LOC3 = (NI64)0;
LOC3 = lengthord_320007_3876443242((*d0).typ);
LOC2[1] = rope_178401_2381377266((NI64)(LOC3 + IL64(1)));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_203), LOC2, 2);
return result0;
}
N_NIMCALL(void, gendiscriminantcheck_549144_839829468)(Tcproc529021* p0, Tloc292816* a0, Tloc292816* tmp0, Ttype292840* objtype0, Tsym292834* field0) {
Ttype292840* t0;
Ropeobj178006* LOC1;
NI64 L0;
TY535235 LOC8;
t0 = skiptypes_296099_850551059(objtype0, IL64(211106240964864));
LOC1 = (Ropeobj178006*)0;
LOC1 = gentypeinfo_535941_839829468((*p0).module, t0);
L0 = lengthord_320007_3876443242((*field0).typ);
{
NIM_BOOL LOC4;
TY178507 LOC7;
LOC4 = (NIM_BOOL)0;
LOC4 = containsorincl_268862_2627731572((&(*(*p0).module).declaredthings), (*field0).Sup.id);
if (!!(LOC4)) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = discriminatortabledecl_536094_839829468((*p0).module, t0, field0);
appcg_532640_839829468((*p0).module, ((Tcfilesection529005) 9), ((NimStringDesc*) &T839829468_610), LOC7, 1);
}
LA5: ;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468(a0);
LOC8[1] = rdloc_538188_839829468(tmp0);
LOC8[2] = discriminatortablename_536057_839829468((*p0).module, t0, field0);
LOC8[3] = intliteral_539270_839829468((NI64)(L0 + IL64(1)));
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_611), LOC8, 4);
}
N_NIMCALL(void, asgnfielddiscriminant_549209_839829468)(Tcproc529021* p0, Tnode292802* e0) {
Tloc292816 a0;
Tloc292816 tmp0;
Tnode292802* dotexpr0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&tmp0), 0, sizeof(tmp0));
dotexpr0 = (*e0).kindU.S6.sons->data[((NI) 0)];
{
if (!((*dotexpr0).kind == ((Tnodekind292020) 46))) goto LA3;
dotexpr0 = (*dotexpr0).kindU.S6.sons->data[((NI) 0)];
}
LA3: ;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
gettemp_537032_839829468(p0, a0.t, (&tmp0), NIM_FALSE);
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0));
gendiscriminantcheck_549144_839829468(p0, (&a0), (&tmp0), (*(*dotexpr0).kindU.S6.sons->data[((NI) 0)]).typ, (*(*dotexpr0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym);
genassignment_539264_839829468(p0, (&a0), (&tmp0), 0);
}
N_NIMCALL(void, genasgn_549239_839829468)(Tcproc529021* p0, Tnode292802* e0, NIM_BOOL fastasgn0) {
genlinedir_532823_839829468(p0, e0);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC3)) goto LA4;
LOC3 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 30))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
gengotovar_544258_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)]);
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC8;
Tloc292816 a0;
LOC8 = (NIM_BOOL)0;
LOC8 = fielddiscriminantcheckneeded_549080_839829468(p0, e0);
if (!!(LOC8)) goto LA9;
memset((void*)(&a0), 0, sizeof(a0));
{
Tnode292802* LOC13;
Tnode292802* LOC16;
LOC13 = (Tnode292802*)0;
LOC13 = HEX5BHEX5D_293238_850551059(e0, ((NI) 0));
if (!((*LOC13).kind == ((Tnodekind292020) 47) || (*LOC13).kind == ((Tnodekind292020) 65))) goto LA14;
LOC16 = (Tnode292802*)0;
LOC16 = HEX5BHEX5D_293238_850551059(e0, ((NI) 0));
genderef_543921_839829468(p0, LOC16, (&a0), NIM_TRUE);
}
goto LA11;
LA14: ;
{
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
}
LA11: ;
{
if (!fastasgn0) goto LA20;
a0.flags |= ((NU16)1)<<((((Tlocflag292810) 2))%(sizeof(NU16)*8));
}
LA20: ;
loadinto_543928_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
}
goto LA1;
LA9: ;
{
asgnfielddiscriminant_549209_839829468(p0, e0);
}
LA1: ;
}
N_NIMCALL(Ropeobj178006*, genasmoremitstmt_548529_839829468)(Tcproc529021* p0, Tnode292802* t0, NIM_BOOL isasmstmt0) {
Ropeobj178006* result0;
NimStringDesc* res0;
result0 = (Ropeobj178006*)0;
res0 = copyString(((NimStringDesc*) &T839829468_490));
{
NI i_548547_839829468;
NI HEX3Atmp_548644_839829468;
NI LOC2;
NI res_548647_839829468;
i_548547_839829468 = (NI)0;
HEX3Atmp_548644_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(t0);
HEX3Atmp_548644_839829468 = (NI)(LOC2 - ((NI) 1));
res_548647_839829468 = ((NI) 0);
{
while (1) {
if (!(res_548647_839829468 <= HEX3Atmp_548644_839829468)) goto LA4;
i_548547_839829468 = res_548647_839829468;
switch ((*(*t0).kindU.S6.sons->data[i_548547_839829468]).kind) {
case ((Tnodekind292020) 20) ... ((Tnodekind292020) 22):
{
res0 = resizeString(res0, (*(*t0).kindU.S6.sons->data[i_548547_839829468]).kindU.S3.strval->Sup.len + 0);
appendString(res0, (*(*t0).kindU.S6.sons->data[i_548547_839829468]).kindU.S3.strval);
}
break;
case ((Tnodekind292020) 3):
{
Tsym292834* sym0;
sym0 = (*(*t0).kindU.S6.sons->data[i_548547_839829468]).kindU.S4.sym;
{
Tloc292816 a0;
Ropeobj178006* LOC11;
NimStringDesc* LOC12;
if (!((28672 &(1U<<((NU)((*sym0).kind)&31U)))!=0)) goto LA9;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[i_548547_839829468], (&a0));
LOC11 = (Ropeobj178006*)0;
LOC11 = rdloc_538188_839829468((&a0));
LOC12 = (NimStringDesc*)0;
LOC12 = HEX24_178856_2381377266(LOC11);
res0 = resizeString(res0, LOC12->Sup.len + 0);
appendString(res0, LOC12);
}
goto LA7;
LA9: ;
{
Ropeobj178006* LOC16;
NimStringDesc* LOC17;
if (!((*sym0).kind == ((Tsymkind292435) 7))) goto LA14;
LOC16 = (Ropeobj178006*)0;
LOC16 = gettypedesc_535673_839829468((*p0).module, (*sym0).typ);
LOC17 = (NimStringDesc*)0;
LOC17 = HEX24_178856_2381377266(LOC16);
res0 = resizeString(res0, LOC17->Sup.len + 0);
appendString(res0, LOC17);
}
goto LA7;
LA14: ;
{
Ropeobj178006* r0;
NimStringDesc* LOC23;
r0 = (*sym0).loc.r;
{
if (!(r0 == NIM_NIL)) goto LA21;
r0 = manglename_533205_839829468(sym0);
asgnRefNoCycle((void**) (&(*sym0).loc.r), r0);
}
LA21: ;
LOC23 = (NimStringDesc*)0;
LOC23 = HEX24_178856_2381377266(r0);
res0 = resizeString(res0, LOC23->Sup.len + 0);
appendString(res0, LOC23);
}
LA7: ;
}
break;
default:
{
internalerror_196100_155036129((*(*t0).kindU.S6.sons->data[i_548547_839829468]).info, ((NimStringDesc*) &T839829468_612));
}
break;
}
res_548647_839829468 += ((NI) 1);
} LA4: ;
}
}
{
NIM_BOOL LOC27;
LOC27 = (NIM_BOOL)0;
LOC27 = isasmstmt0;
if (!(LOC27)) goto LA28;
LOC27 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 5))&7U)))!=0);
LA28: ;
if (!LOC27) goto LA29;
{
NimStringDesc* x_548604_839829468;
NI first_548656_839829468;
NI last_548658_839829468;
x_548604_839829468 = (NimStringDesc*)0;
first_548656_839829468 = ((NI) 0);
last_548658_839829468 = ((NI) 0);
{
while (1) {
NI j0;
{
while (1) {
if (!!((((NU8)(res0->data[last_548658_839829468])) == ((NU8)(0)) || ((NU8)(res0->data[last_548658_839829468])) == ((NU8)(13)) || ((NU8)(res0->data[last_548658_839829468])) == ((NU8)(10))))) goto LA35;
last_548658_839829468 += ((NI) 1);
} LA35: ;
}
x_548604_839829468 = copyStrLast(res0, first_548656_839829468, (NI)(last_548658_839829468 - ((NI) 1)));
j0 = ((NI) 0);
{
while (1) {
if (!(((NU8)(x_548604_839829468->data[j0])) == ((NU8)(32)) || ((NU8)(x_548604_839829468->data[j0])) == ((NU8)(9)))) goto LA37;
j0 += ((NI) 1);
} LA37: ;
}
{
if (!(((NU8)(x_548604_839829468->data[j0])) == ((NU8)(34)) || ((NU8)(x_548604_839829468->data[j0])) == ((NU8)(58)))) goto LA40;
add_178487_2381377266(&result0, x_548604_839829468);
add_178487_2381377266(&result0, tnl_176644_4151366050);
}
goto LA38;
LA40: ;
{
if (!!(((NU8)(x_548604_839829468->data[j0]) == (NU8)(0)))) goto LA43;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_613));
add_178487_2381377266(&result0, x_548604_839829468);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_614));
}
goto LA38;
LA43: ;
LA38: ;
{
if (!((NU8)(res0->data[last_548658_839829468]) == (NU8)(10))) goto LA47;
last_548658_839829468 += ((NI) 1);
}
goto LA45;
LA47: ;
{
if (!((NU8)(res0->data[last_548658_839829468]) == (NU8)(13))) goto LA50;
last_548658_839829468 += ((NI) 1);
{
if (!((NU8)(res0->data[last_548658_839829468]) == (NU8)(10))) goto LA54;
last_548658_839829468 += ((NI) 1);
}
LA54: ;
}
goto LA45;
LA50: ;
{
goto LA32;
}
LA45: ;
first_548656_839829468 = last_548658_839829468;
}
} LA32: ;
}
}
goto LA25;
LA29: ;
{
res0 = resizeString(res0, tnl_176644_4151366050->Sup.len + 0);
appendString(res0, tnl_176644_4151366050);
result0 = rope_178277_2381377266(res0);
}
LA25: ;
return result0;
}
N_NIMCALL(void, genasmstmt_548659_839829468)(Tcproc529021* p0, Tnode292802* t0) {
Ropeobj178006* s0;
genlinedir_532823_839829468(p0, t0);
s0 = genasmoremitstmt_548529_839829468(p0, t0, NIM_TRUE);
{
TY178507 LOC5;
if (!((*p0).prc == NIM_NIL)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = s0;
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 7))- 0], Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field17, LOC5, 1);
}
goto LA1;
LA3: ;
{
TY178507 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = s0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field17, LOC7, 1);
}
LA1: ;
}
static N_INLINE(void, gensimpleblock_544095_839829468)(Tcproc529021* p0, Tnode292802* stmts0) {
TY533289 LOC1;
NI LOC2;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (NI)0;
LOC2 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC1, 0);
genstmts_539244_839829468(p0, stmts0);
endblock_544060_839829468(p0);
}
N_NIMCALL(void, gentrycpp_547866_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
Ropeobj178006* exc0;
TY533289 LOC16;
NI LOC17;
NI length0;
TY178507 LOC18;
Ropeobj178006* LOC19;
NI i0;
NIM_BOOL catchallpresent0;
TY533289 LOC78;
Tnode292802* LOC79;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_297441_850551059((*t0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind292808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA6: ;
genlinedir_532823_839829468(p0, t0);
exc0 = gettempname_533598_839829468((*p0).module);
{
Tsym292834* LOC10;
Ropeobj178006* LOC13;
LOC10 = (Tsym292834*)0;
LOC10 = getcompilerproc_338748_3937434831(((NimStringDesc*) &T839829468_615));
if (!!((LOC10 == NIM_NIL))) goto LA11;
LOC13 = (Ropeobj178006*)0;
LOC13 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_615));
}
goto LA8;
LA11: ;
{
Ropeobj178006* LOC15;
LOC15 = (Ropeobj178006*)0;
LOC15 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_616));
}
LA8: ;
(*p0).nestedtrystmts = (Tnodeseq292796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode292802*));
asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), t0);
++(*p0).nestedtrystmts->Sup.len;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC17 = (NI)0;
LOC17 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_617), LOC16, 0);
expr_539248_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], d0);
length0 = sonslen_295351_850551059(t0);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = exc0;
LOC19 = (Ropeobj178006*)0;
LOC19 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_618), LOC18, 1);
endblock_544035_839829468(p0, LOC19);
{
TY533289 LOC24;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0)) goto LA22;
memset((void*)LOC24, 0, sizeof(LOC24));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_619), LOC24, 0);
}
LA22: ;
(*p0).inexceptblock += ((NI) 1);
i0 = ((NI) 1);
catchallpresent0 = NIM_FALSE;
{
while (1) {
NIM_BOOL LOC27;
NI blen0;
LOC27 = (NIM_BOOL)0;
LOC27 = (i0 < length0);
if (!(LOC27)) goto LA28;
LOC27 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 87));
LA28: ;
if (!LOC27) goto LA26;
{
NIM_BOOL LOC31;
LOC31 = (NIM_BOOL)0;
LOC31 = ((*d0).k == ((Tlockind292808) 1));
if (!(LOC31)) goto LA32;
LOC31 = isemptytype_297441_850551059((*t0).typ);
LA32: ;
if (!LOC31) goto LA33;
(*d0).k = ((Tlockind292808) 0);
}
LA33: ;
blen0 = sonslen_295351_850551059((*t0).kindU.S6.sons->data[i0]);
{
Ropeobj178006** LOC39;
TY533289 LOC40;
if (!(((NI) 1) < i0)) goto LA37;
LOC39 = (Ropeobj178006**)0;
LOC39 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
memset((void*)LOC40, 0, sizeof(LOC40));
addf_179205_2381377266(LOC39, ((NimStringDesc*) &T839829468_620), LOC40, 0);
}
LA37: ;
{
TY533289 LOC45;
NI LOC46;
TY533289 LOC47;
if (!(blen0 == ((NI) 1))) goto LA43;
catchallpresent0 = NIM_TRUE;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC46 = (NI)0;
LOC46 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC45, 0);
expr_539248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)], d0);
memset((void*)LOC47, 0, sizeof(LOC47));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC47, 0);
endblock_544060_839829468(p0);
}
goto LA41;
LA43: ;
{
Ropeobj178006* orexpr0;
TY178507 LOC57;
TY533289 LOC58;
NI LOC59;
TY533289 LOC60;
orexpr0 = NIM_NIL;
{
NI j_547979_839829468;
NI HEX3Atmp_548101_839829468;
NI res_548104_839829468;
j_547979_839829468 = (NI)0;
HEX3Atmp_548101_839829468 = (NI)0;
HEX3Atmp_548101_839829468 = (NI)(blen0 - ((NI) 2));
res_548104_839829468 = ((NI) 0);
{
while (1) {
TY532811 LOC56;
if (!(res_548104_839829468 <= HEX3Atmp_548101_839829468)) goto LA51;
j_547979_839829468 = res_548104_839829468;
{
if (!!((orexpr0 == NIM_NIL))) goto LA54;
add_178487_2381377266(&orexpr0, ((NimStringDesc*) &T839829468_229));
}
LA54: ;
memset((void*)LOC56, 0, sizeof(LOC56));
LOC56[0] = exc0;
LOC56[1] = gentypeinfo_535941_839829468((*p0).module, (*(*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[j_547979_839829468]).typ);
appcg_532632_839829468((*p0).module, &orexpr0, ((NimStringDesc*) &T839829468_621), LOC56, 2);
res_548104_839829468 += ((NI) 1);
} LA51: ;
}
}
memset((void*)LOC57, 0, sizeof(LOC57));
LOC57[0] = orexpr0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_622), LOC57, 1);
memset((void*)LOC58, 0, sizeof(LOC58));
LOC59 = (NI)0;
LOC59 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC58, 0);
expr_539248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[(NI)(blen0 - ((NI) 1))], d0);
memset((void*)LOC60, 0, sizeof(LOC60));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC60, 0);
endblock_544060_839829468(p0);
}
LA41: ;
i0 += ((NI) 1);
} LA26: ;
}
{
TY533289 LOC70;
NI LOC71;
Tnode292802* finallyblock0;
TY533289 LOC76;
Ropeobj178006* LOC77;
if (!!(catchallpresent0)) goto LA63;
{
TY533289 LOC69;
if (!(((NI) 1) < i0)) goto LA67;
memset((void*)LOC69, 0, sizeof(LOC69));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_620), LOC69, 0);
}
LA67: ;
memset((void*)LOC70, 0, sizeof(LOC70));
LOC71 = (NI)0;
LOC71 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC70, 0);
finallyblock0 = lastson_295364_850551059(t0);
{
if (!((*finallyblock0).kind == ((Tnodekind292020) 107))) goto LA74;
genstmts_539244_839829468(p0, (*finallyblock0).kindU.S6.sons->data[((NI) 0)]);
}
LA74: ;
memset((void*)LOC76, 0, sizeof(LOC76));
LOC77 = (Ropeobj178006*)0;
LOC77 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_623), LOC76, 0);
line_532690_839829468(p0, ((Tcprocsection529011) 2), LOC77);
endblock_544060_839829468(p0);
}
LA63: ;
memset((void*)LOC78, 0, sizeof(LOC78));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC78, 0);
(*p0).inexceptblock -= ((NI) 1);
LOC79 = (Tnode292802*)0;
LOC79 = pop_318246_1689653243((&(*p0).nestedtrystmts));
{
NIM_BOOL LOC82;
LOC82 = (NIM_BOOL)0;
LOC82 = (i0 < length0);
if (!(LOC82)) goto LA83;
LOC82 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 107));
LA83: ;
if (!LOC82) goto LA84;
gensimpleblock_544095_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)]);
}
LA84: ;
}
N_NIMCALL(void, line_532695_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* r0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
Ropeobj178006* LOC3;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = rope_178277_2381377266(r0);
LOC3 = (Ropeobj178006*)0;
LOC3 = indentline_532656_839829468(p0, LOC2);
add_178482_2381377266(LOC1, LOC3);
}
static N_INLINE(Ropeobj178006*, pop_178530_1689653243)(TY191350** s0) {
Ropeobj178006* result0;
NI L0;
result0 = (Ropeobj178006*)0;
L0 = (NI)(((*s0) ? (*s0)->Sup.len : 0) - ((NI) 1));
result0 = (*s0)->data[L0];
(*s0) = (TY191350*) setLengthSeq(&((*s0))->Sup, sizeof(Ropeobj178006*), ((NI) (L0)));
return result0;
}
N_NIMCALL(void, gentry_548114_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
NIM_BOOL LOC8;
Ropeobj178006* safepoint0;
TY178507 LOC17;
TY178507 LOC18;
TY178507 LOC37;
NI LOC38;
NI length0;
TY533289 LOC39;
TY533289 LOC40;
NI LOC41;
TY533289 LOC42;
NI i0;
Tnode292802* LOC95;
TY178507 LOC103;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_297441_850551059((*t0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind292808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA6: ;
LOC8 = (NIM_BOOL)0;
LOC8 = includestr_147249_3771138726((&(*(*p0).module).headerfiles), ((NimStringDesc*) &T839829468_624));
genlinedir_532823_839829468(p0, t0);
safepoint0 = gettempname_533598_839829468((*p0).module);
{
Tsym292834* LOC11;
Ropeobj178006* LOC14;
LOC11 = (Tsym292834*)0;
LOC11 = getcompilerproc_338748_3937434831(((NimStringDesc*) &T839829468_615));
if (!!((LOC11 == NIM_NIL))) goto LA12;
LOC14 = (Ropeobj178006*)0;
LOC14 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_615));
}
goto LA9;
LA12: ;
{
Ropeobj178006* LOC16;
LOC16 = (Ropeobj178006*)0;
LOC16 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_616));
}
LA9: ;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_625), LOC17, 1);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_626), LOC18, 1);
{
NIM_BOOL LOC21;
TY178507 LOC24;
LOC21 = (NIM_BOOL)0;
LOC21 = isdefined_200011_1967573533(((NimStringDesc*) &T839829468_627));
if (!LOC21) goto LA22;
memset((void*)LOC24, 0, sizeof(LOC24));
LOC24[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_628), LOC24, 1);
}
goto LA19;
LA22: ;
{
NIM_BOOL LOC26;
TY178507 LOC29;
LOC26 = (NIM_BOOL)0;
LOC26 = isdefined_200011_1967573533(((NimStringDesc*) &T839829468_629));
if (!LOC26) goto LA27;
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_630), LOC29, 1);
}
goto LA19;
LA27: ;
{
NIM_BOOL LOC31;
TY178507 LOC34;
LOC31 = (NIM_BOOL)0;
LOC31 = isdefined_200011_1967573533(((NimStringDesc*) &T839829468_631));
if (!LOC31) goto LA32;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_632), LOC34, 1);
}
goto LA19;
LA32: ;
{
TY178507 LOC36;
memset((void*)LOC36, 0, sizeof(LOC36));
LOC36[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_628), LOC36, 1);
}
LA19: ;
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = safepoint0;
LOC38 = (NI)0;
LOC38 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_633), LOC37, 1);
length0 = sonslen_295351_850551059(t0);
(*p0).nestedtrystmts = (Tnodeseq292796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode292802*));
asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), t0);
++(*p0).nestedtrystmts->Sup.len;
expr_539248_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], d0);
memset((void*)LOC39, 0, sizeof(LOC39));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_605), LOC39, 0);
endblock_544060_839829468(p0);
memset((void*)LOC40, 0, sizeof(LOC40));
LOC41 = (NI)0;
LOC41 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_634), LOC40, 0);
memset((void*)LOC42, 0, sizeof(LOC42));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_605), LOC42, 0);
{
TY533289 LOC47;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0)) goto LA45;
memset((void*)LOC47, 0, sizeof(LOC47));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_619), LOC47, 0);
}
LA45: ;
(*p0).inexceptblock += ((NI) 1);
i0 = ((NI) 1);
{
while (1) {
NIM_BOOL LOC50;
NI blen0;
LOC50 = (NIM_BOOL)0;
LOC50 = (i0 < length0);
if (!(LOC50)) goto LA51;
LOC50 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 87));
LA51: ;
if (!LOC50) goto LA49;
{
NIM_BOOL LOC54;
LOC54 = (NIM_BOOL)0;
LOC54 = ((*d0).k == ((Tlockind292808) 1));
if (!(LOC54)) goto LA55;
LOC54 = isemptytype_297441_850551059((*t0).typ);
LA55: ;
if (!LOC54) goto LA56;
(*d0).k = ((Tlockind292808) 0);
}
LA56: ;
blen0 = sonslen_295351_850551059((*t0).kindU.S6.sons->data[i0]);
{
TY533289 LOC67;
NI LOC68;
TY178507 LOC69;
TY533289 LOC70;
if (!(blen0 == ((NI) 1))) goto LA60;
{
TY533289 LOC66;
if (!(((NI) 1) < i0)) goto LA64;
memset((void*)LOC66, 0, sizeof(LOC66));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_635), LOC66, 0);
}
LA64: ;
memset((void*)LOC67, 0, sizeof(LOC67));
LOC68 = (NI)0;
LOC68 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC67, 0);
memset((void*)LOC69, 0, sizeof(LOC69));
LOC69[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_636), LOC69, 1);
expr_539248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)], d0);
memset((void*)LOC70, 0, sizeof(LOC70));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC70, 0);
endblock_544060_839829468(p0);
}
goto LA58;
LA60: ;
{
Ropeobj178006* orexpr0;
TY178507 LOC91;
NI LOC92;
TY178507 LOC93;
TY533289 LOC94;
orexpr0 = NIM_NIL;
{
NI j_548247_839829468;
NI HEX3Atmp_548521_839829468;
NI res_548524_839829468;
j_548247_839829468 = (NI)0;
HEX3Atmp_548521_839829468 = (NI)0;
HEX3Atmp_548521_839829468 = (NI)(blen0 - ((NI) 2));
res_548524_839829468 = ((NI) 0);
{
while (1) {
NimStringDesc* isobjformat0;
TY178507 LOC86;
if (!(res_548524_839829468 <= HEX3Atmp_548521_839829468)) goto LA74;
j_548247_839829468 = res_548524_839829468;
{
if (!!((orexpr0 == NIM_NIL))) goto LA77;
add_178487_2381377266(&orexpr0, ((NimStringDesc*) &T839829468_229));
}
LA77: ;
{
NIM_BOOL LOC81;
LOC81 = (NIM_BOOL)0;
LOC81 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC81) goto LA82;
LOC81 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA82: ;
if (!!(LOC81)) goto LA83;
isobjformat0 = copyString(((NimStringDesc*) &T839829468_637));
}
goto LA79;
LA83: ;
{
isobjformat0 = copyString(((NimStringDesc*) &T839829468_638));
}
LA79: ;
memset((void*)LOC86, 0, sizeof(LOC86));
LOC86[0] = gentypeinfo_535941_839829468((*p0).module, (*(*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[j_548247_839829468]).typ);
appcg_532632_839829468((*p0).module, &orexpr0, isobjformat0, LOC86, 1);
res_548524_839829468 += ((NI) 1);
} LA74: ;
}
}
{
if (!(((NI) 1) < i0)) goto LA89;
line_532695_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_620));
}
LA89: ;
memset((void*)LOC91, 0, sizeof(LOC91));
LOC91[0] = orexpr0;
LOC92 = (NI)0;
LOC92 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_639), LOC91, 1);
memset((void*)LOC93, 0, sizeof(LOC93));
LOC93[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_636), LOC93, 1);
expr_539248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[(NI)(blen0 - ((NI) 1))], d0);
memset((void*)LOC94, 0, sizeof(LOC94));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC94, 0);
endblock_544060_839829468(p0);
}
LA58: ;
i0 += ((NI) 1);
} LA49: ;
}
(*p0).inexceptblock -= ((NI) 1);
LOC95 = (Tnode292802*)0;
LOC95 = pop_318246_1689653243((&(*p0).nestedtrystmts));
endblock_544060_839829468(p0);
{
NIM_BOOL LOC98;
Ropeobj178006* LOC102;
LOC98 = (NIM_BOOL)0;
LOC98 = (i0 < length0);
if (!(LOC98)) goto LA99;
LOC98 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 107));
LA99: ;
if (!LOC98) goto LA100;
(*p0).finallysafepoints = (TY191350*) incrSeqV2(&((*p0).finallysafepoints)->Sup, sizeof(Ropeobj178006*));
asgnRefNoCycle((void**) (&(*p0).finallysafepoints->data[(*p0).finallysafepoints->Sup.len]), safepoint0);
++(*p0).finallysafepoints->Sup.len;
gensimpleblock_544095_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)]);
LOC102 = (Ropeobj178006*)0;
LOC102 = pop_178530_1689653243((&(*p0).finallysafepoints));
}
LA100: ;
memset((void*)LOC103, 0, sizeof(LOC103));
LOC103[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_640), LOC103, 1);
}
N_NIMCALL(NimStringDesc*, getraisefrmt_546824_839829468)(Tcproc529021* p0) {
NimStringDesc* result0;
result0 = (NimStringDesc*)0;
result0 = copyString(((NimStringDesc*) &T839829468_641));
return result0;
}
N_NIMCALL(void, genraisestmt_546828_839829468)(Tcproc529021* p0, Tnode292802* t0) {
{
Tnode292802* finallyblock0;
if (!(((NI) 0) < (*p0).inexceptblock)) goto LA3;
finallyblock0 = lastson_295364_850551059((*p0).nestedtrystmts->data[(NI)(((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0) - ((NI) 1))]);
{
if (!((*finallyblock0).kind == ((Tnodekind292020) 107))) goto LA7;
gensimpleblock_544095_839829468(p0, (*finallyblock0).kindU.S6.sons->data[((NI) 0)]);
}
LA7: ;
}
LA3: ;
{
Tloc292816 a0;
Ropeobj178006* e0;
Ttype292840* typ0;
NimStringDesc* LOC13;
TY532811 LOC14;
if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA11;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0));
e0 = rdloc_538188_839829468((&a0));
typ0 = skiptypes_296099_850551059((*(*t0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106247256320));
genlinedir_532823_839829468(p0, t0);
LOC13 = (NimStringDesc*)0;
LOC13 = getraisefrmt_546824_839829468(p0);
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = e0;
LOC14[1] = makecstring_191638_155036129((*(*(*typ0).sym).name).s);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), LOC13, LOC14, 2);
}
goto LA9;
LA11: ;
{
genlinedir_532823_839829468(p0, t0);
{
NIM_BOOL LOC18;
NIM_BOOL LOC19;
TY533289 LOC24;
Ropeobj178006* LOC25;
LOC18 = (NIM_BOOL)0;
LOC19 = (NIM_BOOL)0;
LOC19 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC19) goto LA20;
LOC19 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA20: ;
LOC18 = LOC19;
if (!(LOC18)) goto LA21;
LOC18 = !(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 31))&63U)))!=0));
LA21: ;
if (!LOC18) goto LA22;
memset((void*)LOC24, 0, sizeof(LOC24));
LOC25 = (Ropeobj178006*)0;
LOC25 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_623), LOC24, 0);
line_532690_839829468(p0, ((Tcprocsection529011) 2), LOC25);
}
goto LA16;
LA22: ;
{
TY533289 LOC27;
memset((void*)LOC27, 0, sizeof(LOC27));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_642), LOC27, 0);
}
LA16: ;
}
LA9: ;
}
N_NIMCALL(void, gentypesection_538184_839829468)(Tcgen529027* m0, Tnode292802* n0) {
}
N_NIMCALL(Tcfilesection529005, determinesection_548819_839829468)(Tnode292802* n0) {
Tcfilesection529005 result0;
result0 = (Tcfilesection529005)0;
result0 = ((Tcfilesection529005) 7);
{
NIM_BOOL LOC3;
NI LOC4;
NimStringDesc* sec0;
LOC3 = (NIM_BOOL)0;
LOC4 = (NI)0;
LOC4 = len_293081_850551059(n0);
LOC3 = (((NI) 1) <= LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind >= ((Tnodekind292020) 20) && (*(*n0).kindU.S6.sons->data[((NI) 0)]).kind <= ((Tnodekind292020) 22));
LA5: ;
if (!LOC3) goto LA6;
sec0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S3.strval;
{
NIM_BOOL LOC10;
LOC10 = (NIM_BOOL)0;
LOC10 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_643));
if (!LOC10) goto LA11;
result0 = ((Tcfilesection529005) 3);
}
goto LA8;
LA11: ;
{
NIM_BOOL LOC14;
LOC14 = (NIM_BOOL)0;
LOC14 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_644));
if (!LOC14) goto LA15;
result0 = ((Tcfilesection529005) 9);
}
goto LA8;
LA15: ;
{
NIM_BOOL LOC18;
LOC18 = (NIM_BOOL)0;
LOC18 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_645));
if (!LOC18) goto LA19;
result0 = ((Tcfilesection529005) 1);
}
goto LA8;
LA19: ;
LA8: ;
}
LA6: ;
return result0;
}
N_NIMCALL(void, genemit_548839_839829468)(Tcproc529021* p0, Tnode292802* t0) {
Ropeobj178006* s0;
s0 = genasmoremitstmt_548529_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], NIM_FALSE);
{
Tcfilesection529005 section0;
Tnode292802* LOC5;
if (!((*p0).prc == NIM_NIL)) goto LA3;
LOC5 = (Tnode292802*)0;
LOC5 = HEX5BHEX5D_293238_850551059(t0, ((NI) 1));
section0 = determinesection_548819_839829468(LOC5);
genclinedir_532813_839829468(&(*(*p0).module).s[(section0)- 0], (*t0).info);
add_178482_2381377266(&(*(*p0).module).s[(section0)- 0], s0);
}
goto LA1;
LA3: ;
{
genlinedir_532823_839829468(p0, t0);
line_532690_839829468(p0, ((Tcprocsection529011) 2), s0);
}
LA1: ;
}
N_NIMCALL(void, genbreakpoint_548862_839829468)(Tcproc529021* p0, Tnode292802* t0) {
NimStringDesc* name0;
name0 = (NimStringDesc*)0;
{
TY535238 LOC12;
NI LOC13;
NimStringDesc* LOC14;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 17))&31U)))!=0)) goto LA3;
{
if (!((*t0).kind == ((Tnodekind292020) 34))) goto LA7;
name0 = nsuNormalize((*(*t0).kindU.S6.sons->data[((NI) 1)]).kindU.S3.strval);
}
goto LA5;
LA7: ;
{
NimStringDesc* LOC10;
NimStringDesc* LOC11;
breakpointid_548860_839829468 += ((NI) 1);
LOC10 = (NimStringDesc*)0;
LOC11 = (NimStringDesc*)0;
LOC11 = nimIntToStr(breakpointid_548860_839829468);
LOC10 = rawNewString(LOC11->Sup.len + 2);
appendString(LOC10, ((NimStringDesc*) &T839829468_646));
appendString(LOC10, LOC11);
name0 = LOC10;
}
LA5: ;
genlinedir_532823_839829468(p0, t0);
memset((void*)LOC12, 0, sizeof(LOC12));
LOC13 = (NI)0;
LOC13 = tolinenumber_192415_155036129((*t0).info);
LOC12[0] = rope_178401_2381377266(((NI64) (LOC13)));
LOC14 = (NimStringDesc*)0;
LOC14 = tofilename_192257_155036129((*t0).info.fileindex);
LOC12[1] = makecstring_191638_155036129(LOC14);
LOC12[2] = makecstring_191638_155036129(name0);
appcg_532632_839829468((*p0).module, &gbreakpoints_548861_839829468, ((NimStringDesc*) &T839829468_647), LOC12, 3);
}
LA3: ;
}
N_NIMCALL(void, genwatchpoint_549016_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Tloc292816 a0;
Ttype292840* typ0;
TY535238 LOC5;
NimStringDesc* LOC6;
{ {
if (!!((((*p0).options &(1U<<((NU)(((Toption169009) 17))&31U)))!=0))) goto LA3;
goto BeforeRet;
}
LA3: ;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0));
typ0 = skiptypes_296099_850551059((*(*n0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = addrloc_538204_839829468((&a0));
LOC6 = (NimStringDesc*)0;
LOC6 = rendertree_311044_382274130((*n0).kindU.S6.sons->data[((NI) 1)], 0);
LOC5[1] = makecstring_191638_155036129(LOC6);
LOC5[2] = gentypeinfo_535941_839829468((*p0).module, typ0);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_648), LOC5, 3);
}BeforeRet: ;
}
N_NIMCALL(void, genpragma_549039_839829468)(Tcproc529021* p_549041_839829468, Tnode292802* n0) {
{
NI i_549054_839829468;
NI HEX3Atmp_549073_839829468;
NI LOC2;
NI res_549076_839829468;
i_549054_839829468 = (NI)0;
HEX3Atmp_549073_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(n0);
HEX3Atmp_549073_839829468 = (NI)(LOC2 - ((NI) 1));
res_549076_839829468 = ((NI) 0);
{
while (1) {
Tnode292802* it0;
Tspecialword275003 LOC5;
if (!(res_549076_839829468 <= HEX3Atmp_549073_839829468)) goto LA4;
i_549054_839829468 = res_549076_839829468;
it0 = (*n0).kindU.S6.sons->data[i_549054_839829468];
LOC5 = (Tspecialword275003)0;
LOC5 = whichpragma_318911_2616423590(it0);
switch (LOC5) {
case ((Tspecialword275003) 191):
{
genemit_548839_839829468(p_549041_839829468, it0);
}
break;
case ((Tspecialword275003) 131):
{
genbreakpoint_548862_839829468(p_549041_839829468, it0);
}
break;
case ((Tspecialword275003) 176):
{
genwatchpoint_549016_839829468(p_549041_839829468, it0);
}
break;
case ((Tspecialword275003) 183):
{
Tcproc529021* p0;
Ropeobj178006** LOC10;
p0 = newproc_529206_3723162438(NIM_NIL, (*p_549041_839829468).module);
(*p0).options = ((*p0).options & ~ 98304);
genstmts_539244_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)]);
LOC10 = (Ropeobj178006**)0;
LOC10 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
asgnRefNoCycle((void**) (&(*(*p0).module).injectstmt), (*LOC10));
}
break;
default:
{
}
break;
}
res_549076_839829468 += ((NI) 1);
} LA4: ;
}
}
}
N_NIMCALL(void, genparforstmt_546208_839829468)(Tcproc529021* p0, Tnode292802* t0) {
NI oldbreakidx_546411_839829468;
Tsym292834* forloopvar0;
Tloc292816 rangea0;
Tloc292816 rangeb0;
Tnode292802* call0;
TY535235 LOC1;
NimStringDesc* LOC2;
TY533289 LOC3;
(*p0).withinloop += ((NI) 1);
genlinedir_532823_839829468(p0, t0);
oldbreakidx_546411_839829468 = (*p0).breakidx;
forloopvar0 = (*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
memset((void*)(&rangea0), 0, sizeof(rangea0));
memset((void*)(&rangeb0), 0, sizeof(rangeb0));
assignlocalvar_538614_839829468(p0, forloopvar0);
call0 = (*t0).kindU.S6.sons->data[((NI) 1)];
initlocexpr_539283_839829468(p0, (*call0).kindU.S6.sons->data[((NI) 1)], (&rangea0));
initlocexpr_539283_839829468(p0, (*call0).kindU.S6.sons->data[((NI) 2)], (&rangeb0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((&(*forloopvar0).loc));
LOC1[1] = rdloc_538188_839829468((&rangea0));
LOC1[2] = rdloc_538188_839829468((&rangeb0));
LOC2 = (NimStringDesc*)0;
LOC2 = getstr_297230_850551059((*call0).kindU.S6.sons->data[((NI) 3)]);
LOC1[3] = rope_178277_2381377266(LOC2);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_649), LOC1, 4);
memset((void*)LOC3, 0, sizeof(LOC3));
(*p0).breakidx = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC3, 0);
(*p0).blocks->data[(*p0).breakidx].isloop = NIM_TRUE;
genstmts_539244_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 2)]);
endblock_544060_839829468(p0);
(*p0).breakidx = oldbreakidx_546411_839829468;
(*p0).withinloop -= ((NI) 1);
}
N_NIMCALL(void, genstate_544117_839829468)(Tcproc529021* p0, Tnode292802* n0) {
NI64 idx0;
TY178507 LOC9;
{
NIM_BOOL LOC3;
NI LOC4;
NimStringDesc* LOC8;
LOC3 = (NIM_BOOL)0;
LOC4 = (NI)0;
LOC4 = len_293081_850551059(n0);
LOC3 = (LOC4 == ((NI) 1));
if (!(LOC3)) goto LA5;
LOC3 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 6));
LA5: ;
if (!!(LOC3)) goto LA6;
LOC8 = (NimStringDesc*)0;
LOC8 = HEX24_196185_1689653243(T839829468_650);
internalerror_196113_155036129(LOC8);
}
LA6: ;
idx0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval;
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = rope_178401_2381377266(idx0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_652), LOC9, 1);
}
N_NIMCALL(void, gengotostate_544144_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Tloc292816 a0;
TY178507 LOC1;
TY533289 LOC2;
TY533289 LOC7;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((&a0));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_603), LOC1, 1);
(*p0).beforeretneeded = NIM_TRUE;
memset((void*)LOC2, 0, sizeof(LOC2));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_653), LOC2, 0);
{
NI64 i_544214_839829468;
NI64 HEX3Atmp_544223_839829468;
NI64 res_544226_839829468;
i_544214_839829468 = (NI64)0;
HEX3Atmp_544223_839829468 = (NI64)0;
HEX3Atmp_544223_839829468 = lastord_320004_3876443242((*(*n0).kindU.S6.sons->data[((NI) 0)]).typ);
res_544226_839829468 = IL64(0);
{
while (1) {
TY178507 LOC6;
if (!(res_544226_839829468 <= HEX3Atmp_544223_839829468)) goto LA5;
i_544214_839829468 = res_544226_839829468;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rope_178401_2381377266(i_544214_839829468);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_654), LOC6, 1);
res_544226_839829468 += ((NI) 1);
} LA5: ;
}
}
memset((void*)LOC7, 0, sizeof(LOC7));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC7, 0);
}
N_NIMCALL(void, genbreakstate_544229_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Tloc292816 a0;
memset((void*)(&a0), 0, sizeof(a0));
{
TY178507 LOC5;
if (!((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 155))) goto LA3;
initlocexpr_539283_839829468(p0, (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468((&a0));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_655), LOC5, 1);
}
goto LA1;
LA3: ;
{
TY178507 LOC7;
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_538188_839829468((&a0));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_656), LOC7, 1);
}
LA1: ;
}
N_NIMCALL(void, expr_539248_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
switch ((*n0).kind) {
case ((Tnodekind292020) 3):
{
Tsym292834* sym0;
sym0 = (*n0).kindU.S4.sym;
switch ((*sym0).kind) {
case ((Tsymkind292435) 13):
{
{
if (!!(((33554448 & (*sym0).flags) == 0))) goto LA5;
fillprocloc_539201_839829468(sym0);
genprocprototype_539254_839829468((*p0).module, sym0);
}
goto LA3;
LA5: ;
{
genproc_532951_839829468((*p0).module, sym0);
}
LA3: ;
putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc));
}
break;
case ((Tsymkind292435) 12):
case ((Tsymkind292435) 15):
case ((Tsymkind292435) 14):
{
{
NimStringDesc* LOC13;
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 23))&31U)))!=0)) goto LA11;
LOC13 = (NimStringDesc*)0;
LOC13 = rawNewString((*(*sym0).name).s->Sup.len + 48);
appendString(LOC13, ((NimStringDesc*) &T839829468_270));
appendString(LOC13, (*(*sym0).name).s);
localerror_196085_155036129((*n0).info, LOC13);
}
LA11: ;
genproc_532951_839829468((*p0).module, sym0);
{
NIM_BOOL LOC16;
NimStringDesc* LOC20;
LOC16 = (NIM_BOOL)0;
LOC16 = ((*sym0).loc.r == NIM_NIL);
if (LOC16) goto LA17;
LOC16 = ((*sym0).loc.t == NIM_NIL);
LA17: ;
if (!LOC16) goto LA18;
LOC20 = (NimStringDesc*)0;
LOC20 = rawNewString((*(*sym0).name).s->Sup.len + 20);
appendString(LOC20, ((NimStringDesc*) &T839829468_271));
appendString(LOC20, (*(*sym0).name).s);
internalerror_196100_155036129((*n0).info, LOC20);
}
LA18: ;
putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc));
}
break;
case ((Tsymkind292435) 10):
{
{
NIM_BOOL LOC24;
Ropeobj178006* LOC27;
LOC24 = (NIM_BOOL)0;
LOC24 = issimpleconst_532311_839829468((*sym0).typ);
if (!LOC24) goto LA25;
LOC27 = (Ropeobj178006*)0;
LOC27 = genliteral_549476_839829468(p0, (*sym0).ast, (*sym0).typ);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC27, ((Tstorageloc292812) 1));
}
goto LA22;
LA25: ;
{
gencomplexconst_558249_839829468(p0, sym0, d0);
}
LA22: ;
}
break;
case ((Tsymkind292435) 19):
{
Ropeobj178006* LOC30;
LOC30 = (Ropeobj178006*)0;
LOC30 = rope_178401_2381377266(((NI64) ((*sym0).position)));
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC30, ((Tstorageloc292812) 0));
}
break;
case ((Tsymkind292435) 8):
case ((Tsymkind292435) 20):
case ((Tsymkind292435) 11):
case ((Tsymkind292435) 9):
{
{
if (!!(((4194312 & (*sym0).flags) == 0))) goto LA34;
genvarprototype_539236_839829468((*p0).module, sym0);
}
LA34: ;
{
NIM_BOOL LOC38;
NimStringDesc* LOC42;
NimStringDesc* LOC43;
LOC38 = (NIM_BOOL)0;
LOC38 = ((*sym0).loc.r == NIM_NIL);
if (LOC38) goto LA39;
LOC38 = ((*sym0).loc.t == NIM_NIL);
LA39: ;
if (!LOC38) goto LA40;
LOC42 = (NimStringDesc*)0;
LOC43 = (NimStringDesc*)0;
LOC43 = nimIntToStr((*sym0).Sup.id);
LOC42 = rawNewString((*(*sym0).name).s->Sup.len + LOC43->Sup.len + 20);
appendString(LOC42, ((NimStringDesc*) &T839829468_285));
appendString(LOC42, (*(*sym0).name).s);
appendString(LOC42, ((NimStringDesc*) &T839829468_12));
appendString(LOC42, LOC43);
internalerror_196100_155036129((*n0).info, LOC42);
}
LA40: ;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 22))&31U)))!=0)) goto LA46;
accessthreadlocalvar_532945_839829468(p0, sym0);
{
NIM_BOOL LOC50;
Ropeobj178006* LOC53;
LOC50 = (NIM_BOOL)0;
LOC50 = emulatedthreadvars_532949_839829468();
if (!LOC50) goto LA51;
LOC53 = (Ropeobj178006*)0;
LOC53 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_288), (*sym0).loc.r);
putintodest_550468_839829468(p0, d0, (*sym0).loc.t, LOC53, ((Tstorageloc292812) 0));
}
goto LA48;
LA51: ;
{
putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc));
}
LA48: ;
}
goto LA44;
LA46: ;
{
putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc));
}
LA44: ;
}
break;
case ((Tsymkind292435) 5):
{
{
NIM_BOOL LOC59;
NimStringDesc* LOC63;
NimStringDesc* LOC64;
LOC59 = (NIM_BOOL)0;
LOC59 = ((*sym0).loc.r == NIM_NIL);
if (LOC59) goto LA60;
LOC59 = ((*sym0).loc.t == NIM_NIL);
LA60: ;
if (!LOC59) goto LA61;
LOC63 = (NimStringDesc*)0;
LOC64 = (NimStringDesc*)0;
LOC64 = nimIntToStr((*sym0).Sup.id);
LOC63 = rawNewString((*(*sym0).name).s->Sup.len + LOC64->Sup.len + 21);
appendString(LOC63, ((NimStringDesc*) &T839829468_289));
appendString(LOC63, (*(*sym0).name).s);
appendString(LOC63, ((NimStringDesc*) &T839829468_12));
appendString(LOC63, LOC64);
internalerror_196100_155036129((*n0).info, LOC63);
}
LA61: ;
putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc));
}
break;
case ((Tsymkind292435) 3):
{
{
NIM_BOOL LOC68;
NimStringDesc* LOC72;
NimStringDesc* LOC73;
LOC68 = (NIM_BOOL)0;
LOC68 = ((*sym0).loc.r == NIM_NIL);
if (LOC68) goto LA69;
LOC68 = ((*sym0).loc.t == NIM_NIL);
LA69: ;
if (!LOC68) goto LA70;
LOC72 = (NimStringDesc*)0;
LOC73 = (NimStringDesc*)0;
LOC73 = nimIntToStr((*sym0).Sup.id);
LOC72 = rawNewString((*(*sym0).name).s->Sup.len + LOC73->Sup.len + 22);
appendString(LOC72, ((NimStringDesc*) &T839829468_290));
appendString(LOC72, (*(*sym0).name).s);
appendString(LOC72, ((NimStringDesc*) &T839829468_12));
appendString(LOC72, LOC73);
internalerror_196100_155036129((*n0).info, LOC72);
}
LA70: ;
putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc));
}
break;
default:
{
NimStringDesc* LOC75;
LOC75 = (NimStringDesc*)0;
LOC75 = rawNewString(reprEnum((NI)(*sym0).kind, (&NTI292435))->Sup.len + 22);
appendString(LOC75, ((NimStringDesc*) &T839829468_291));
appendString(LOC75, reprEnum((NI)(*sym0).kind, (&NTI292435)));
appendString(LOC75, ((NimStringDesc*) &T839829468_292));
internalerror_196100_155036129((*n0).info, LOC75);
}
break;
}
}
break;
case ((Tnodekind292020) 23):
{
{
NIM_BOOL LOC79;
Ropeobj178006* LOC82;
LOC79 = (NIM_BOOL)0;
LOC79 = isemptytype_297441_850551059((*n0).typ);
if (!!(LOC79)) goto LA80;
LOC82 = (Ropeobj178006*)0;
LOC82 = genliteral_539273_839829468(p0, n0);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC82, ((Tstorageloc292812) 0));
}
LA80: ;
}
break;
case ((Tnodekind292020) 20) ... ((Tnodekind292020) 22):
{
Ropeobj178006* LOC84;
LOC84 = (Ropeobj178006*)0;
LOC84 = genliteral_539273_839829468(p0, n0);
putdataintodest_550436_839829468(p0, d0, (*n0).typ, LOC84);
}
break;
case ((Tnodekind292020) 6) ... ((Tnodekind292020) 15):
case ((Tnodekind292020) 16) ... ((Tnodekind292020) 19):
case ((Tnodekind292020) 5):
{
Ropeobj178006* LOC86;
LOC86 = (Ropeobj178006*)0;
LOC86 = genliteral_539273_839829468(p0, n0);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC86, ((Tstorageloc292812) 0));
}
break;
case ((Tnodekind292020) 27):
case ((Tnodekind292020) 32):
case ((Tnodekind292020) 29):
case ((Tnodekind292020) 30):
case ((Tnodekind292020) 31):
case ((Tnodekind292020) 26):
case ((Tnodekind292020) 28):
{
Tnode292802* op0;
genlinedir_532823_839829468(p0, n0);
op0 = (*n0).kindU.S6.sons->data[((NI) 0)];
{
Tloc292816 a0;
if (!(*n0).typ == 0) goto LA90;
memset((void*)(&a0), 0, sizeof(a0));
{
NIM_BOOL LOC94;
LOC94 = (NIM_BOOL)0;
LOC94 = ((*op0).kind == ((Tnodekind292020) 3));
if (!(LOC94)) goto LA95;
LOC94 = !(((*(*op0).kindU.S4.sym).magic == ((Tmagic292524) 0)));
LA95: ;
if (!LOC94) goto LA96;
genmagicexpr_557033_839829468(p0, n0, (&a0), (*(*op0).kindU.S4.sym).magic);
}
goto LA92;
LA96: ;
{
gencall_543632_839829468(p0, n0, (&a0));
}
LA92: ;
}
goto LA88;
LA90: ;
{
{
NIM_BOOL LOC102;
LOC102 = (NIM_BOOL)0;
LOC102 = ((*op0).kind == ((Tnodekind292020) 3));
if (!(LOC102)) goto LA103;
LOC102 = !(((*(*op0).kindU.S4.sym).magic == ((Tmagic292524) 0)));
LA103: ;
if (!LOC102) goto LA104;
genmagicexpr_557033_839829468(p0, n0, d0, (*(*op0).kindU.S4.sym).magic);
}
goto LA100;
LA104: ;
{
gencall_543632_839829468(p0, n0, d0);
}
LA100: ;
}
LA88: ;
}
break;
case ((Tnodekind292020) 39):
{
{
NIM_BOOL LOC110;
NI LOC112;
Ropeobj178006* LOC115;
LOC110 = (NIM_BOOL)0;
LOC110 = isdeepconstexpr_318566_2616423590(n0);
if (!(LOC110)) goto LA111;
LOC112 = (NI)0;
LOC112 = len_293081_850551059(n0);
LOC110 = !((LOC112 == ((NI) 0)));
LA111: ;
if (!LOC110) goto LA113;
LOC115 = (Ropeobj178006*)0;
LOC115 = gensetnode_549664_839829468(p0, n0);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC115, ((Tstorageloc292812) 0));
}
goto LA108;
LA113: ;
{
gensetconstr_557496_839829468(p0, n0, d0);
}
LA108: ;
}
break;
case ((Tnodekind292020) 41):
{
{
NIM_BOOL LOC120;
NI LOC122;
LOC120 = (NIM_BOOL)0;
LOC120 = isdeepconstexpr_318566_2616423590(n0);
if (!(LOC120)) goto LA121;
LOC122 = (NI)0;
LOC122 = len_293081_850551059(n0);
LOC120 = !((LOC122 == ((NI) 0)));
LA121: ;
if (!LOC120) goto LA123;
exprcomplexconst_558684_839829468(p0, n0, d0);
}
goto LA118;
LA123: ;
{
Ttype292840* LOC126;
LOC126 = (Ttype292840*)0;
LOC126 = skiptypes_296099_850551059((*n0).typ, IL64(211106242013440));
if (!((*LOC126).kind == ((Ttypekind292244) 24))) goto LA127;
genseqconstr_555004_839829468(p0, n0, d0);
}
goto LA118;
LA127: ;
{
genarrayconstr_558207_839829468(p0, n0, d0);
}
LA118: ;
}
break;
case ((Tnodekind292020) 37):
{
{
NIM_BOOL LOC133;
NI LOC135;
LOC133 = (NIM_BOOL)0;
LOC133 = isdeepconstexpr_318566_2616423590(n0);
if (!(LOC133)) goto LA134;
LOC135 = (NI)0;
LOC135 = len_293081_850551059(n0);
LOC133 = !((LOC135 == ((NI) 0)));
LA134: ;
if (!LOC133) goto LA136;
exprcomplexconst_558684_839829468(p0, n0, d0);
}
goto LA131;
LA136: ;
{
gentupleconstr_557618_839829468(p0, n0, d0);
}
LA131: ;
}
break;
case ((Tnodekind292020) 38):
{
genobjconstr_554903_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 61):
{
gencast_556538_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 58):
case ((Tnodekind292020) 59):
case ((Tnodekind292020) 60):
{
genconv_556633_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 64):
case ((Tnodekind292020) 63):
{
genaddr_553051_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 42):
{
genbracketexpr_554277_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 47):
case ((Tnodekind292020) 65):
{
genderef_543921_839829468(p0, n0, d0, NIM_FALSE);
}
break;
case ((Tnodekind292020) 45):
{
genrecordfield_553448_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 46):
{
gencheckedrecordfield_554046_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 127):
case ((Tnodekind292020) 112):
{
genblock_546083_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 126):
{
genstmtlistexpr_558402_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 115):
{
{
NI i_559023_839829468;
NI HEX3Atmp_559276_839829468;
NI LOC151;
NI res_559279_839829468;
i_559023_839829468 = (NI)0;
HEX3Atmp_559276_839829468 = (NI)0;
LOC151 = (NI)0;
LOC151 = sonslen_295351_850551059(n0);
HEX3Atmp_559276_839829468 = (NI)(LOC151 - ((NI) 1));
res_559279_839829468 = ((NI) 0);
{
while (1) {
if (!(res_559279_839829468 <= HEX3Atmp_559276_839829468)) goto LA153;
i_559023_839829468 = res_559279_839829468;
genstmts_539244_839829468(p0, (*n0).kindU.S6.sons->data[i_559023_839829468]);
res_559279_839829468 += ((NI) 1);
} LA153: ;
}
}
}
break;
case ((Tnodekind292020) 48):
case ((Tnodekind292020) 92):
{
genif_544982_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 93):
{
expr_539248_839829468(p0, (*(*n0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[((NI) 0)], d0);
}
break;
case ((Tnodekind292020) 66):
{
downconv_558581_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 67):
{
upconv_558431_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 68):
{
genrangechck_556591_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_563));
}
break;
case ((Tnodekind292020) 69):
{
genrangechck_556591_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_564));
}
break;
case ((Tnodekind292020) 70):
{
genrangechck_556591_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_565));
}
break;
case ((Tnodekind292020) 71):
{
convstrtocstr_556643_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 72):
{
convcstrtostr_556655_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 51):
case ((Tnodekind292020) 52):
{
Tsym292834* sym0;
sym0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
genproc_532951_839829468((*p0).module, sym0);
{
NIM_BOOL LOC166;
NimStringDesc* LOC170;
LOC166 = (NIM_BOOL)0;
LOC166 = ((*sym0).loc.r == NIM_NIL);
if (LOC166) goto LA167;
LOC166 = ((*sym0).loc.t == NIM_NIL);
LA167: ;
if (!LOC166) goto LA168;
LOC170 = (NimStringDesc*)0;
LOC170 = rawNewString((*(*sym0).name).s->Sup.len + 20);
appendString(LOC170, ((NimStringDesc*) &T839829468_271));
appendString(LOC170, (*(*sym0).name).s);
internalerror_196100_155036129((*n0).info, LOC170);
}
LA168: ;
putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc));
}
break;
case ((Tnodekind292020) 155):
{
genclosure_557836_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 1):
{
}
break;
case ((Tnodekind292020) 96):
{
genwhilestmt_545985_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 99):
case ((Tnodekind292020) 100):
{
genvarstmt_544854_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 101):
{
genconststmt_544909_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 94):
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_594));
}
break;
case ((Tnodekind292020) 97):
{
gencase_547827_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 109):
{
genreturnstmt_545617_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 110):
{
genbreakstmt_546444_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 73):
{
{
if (!!((((*n0).flags &(1U<<((NU)(((Tnodeflag292427) 14))&15U)))!=0))) goto LA183;
genasgn_549239_839829468(p0, n0, NIM_FALSE);
}
LA183: ;
}
break;
case ((Tnodekind292020) 74):
{
{
if (!!((((*n0).flags &(1U<<((NU)(((Tnodeflag292427) 14))&15U)))!=0))) goto LA188;
genasgn_549239_839829468(p0, n0, !(((*p0).prc == NIM_NIL)));
}
LA188: ;
}
break;
case ((Tnodekind292020) 114):
{
{
Tloc292816 a0;
if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA193;
genlinedir_532823_839829468(p0, n0);
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
}
LA193: ;
}
break;
case ((Tnodekind292020) 89):
{
genasmstmt_548659_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 106):
{
{
NIM_BOOL LOC199;
NIM_BOOL LOC200;
LOC199 = (NIM_BOOL)0;
LOC200 = (NIM_BOOL)0;
LOC200 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC200) goto LA201;
LOC200 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA201: ;
LOC199 = LOC200;
if (!(LOC199)) goto LA202;
LOC199 = !(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 31))&63U)))!=0));
LA202: ;
if (!LOC199) goto LA203;
gentrycpp_547866_839829468(p0, n0, d0);
}
goto LA197;
LA203: ;
{
gentry_548114_839829468(p0, n0, d0);
}
LA197: ;
}
break;
case ((Tnodekind292020) 108):
{
genraisestmt_546828_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 98):
{
gentypesection_538184_839829468((*p0).module, n0);
}
break;
case ((Tnodekind292020) 125):
case ((Tnodekind292020) 84):
case ((Tnodekind292020) 121):
case ((Tnodekind292020) 116):
case ((Tnodekind292020) 117):
case ((Tnodekind292020) 118):
case ((Tnodekind292020) 119):
case ((Tnodekind292020) 120):
case ((Tnodekind292020) 83):
case ((Tnodekind292020) 82):
{
}
break;
case ((Tnodekind292020) 90):
{
genpragma_549039_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 91):
{
Tnode292802* LOC211;
LOC211 = (Tnode292802*)0;
LOC211 = lastson_295364_850551059(n0);
expr_539248_839829468(p0, LOC211, d0);
}
break;
case ((Tnodekind292020) 79):
case ((Tnodekind292020) 80):
case ((Tnodekind292020) 81):
{
{
Tsym292834* prc0;
if (!((*(*n0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 1))) goto LA215;
prc0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
NIM_BOOL LOC219;
Tsym292834* LOC220;
LOC219 = (NIM_BOOL)0;
LOC220 = (Tsym292834*)0;
LOC220 = skipgenericowner_297280_850551059(prc0);
LOC219 = ((*LOC220).kind == ((Tsymkind292435) 6));
if (!(LOC219)) goto LA221;
LOC219 = !((((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 23))&31U)))!=0));
LA221: ;
if (!LOC219) goto LA222;
{
NIM_BOOL LOC226;
NIM_BOOL LOC227;
NIM_BOOL LOC228;
NIM_BOOL LOC229;
Tsym292834* LOC231;
NIM_BOOL LOC234;
LOC226 = (NIM_BOOL)0;
LOC227 = (NIM_BOOL)0;
LOC228 = (NIM_BOOL)0;
LOC229 = (NIM_BOOL)0;
LOC229 = !(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 2))&63U)))!=0));
if (!(LOC229)) goto LA230;
LOC231 = (Tsym292834*)0;
LOC231 = getmodule_299123_2984716966(prc0);
LOC229 = !((((*LOC231).flags &(1U<<((NU)(((Tsymflag292184) 25))&31U)))!=0));
LA230: ;
LOC228 = LOC229;
if (LOC228) goto LA232;
LOC228 = ((65600 & (*prc0).flags) == 64);
LA232: ;
LOC227 = LOC228;
if (LOC227) goto LA233;
LOC234 = (NIM_BOOL)0;
LOC234 = (((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 6))&31U)))!=0);
if (!(LOC234)) goto LA235;
LOC234 = (((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 5))&15U)))!=0);
LA235: ;
LOC227 = LOC234;
LA233: ;
LOC226 = LOC227;
if (LOC226) goto LA236;
LOC226 = ((*prc0).kind == ((Tsymkind292435) 13));
LA236: ;
if (!LOC226) goto LA237;
{
NIM_BOOL LOC241;
Tnode292802* LOC242;
LOC241 = (NIM_BOOL)0;
LOC242 = (Tnode292802*)0;
LOC242 = getbody_335226_1724185294(prc0);
LOC241 = !(((*LOC242).kind == ((Tnodekind292020) 1)));
if (LOC241) goto LA243;
LOC241 = (((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0);
LA243: ;
if (!LOC241) goto LA244;
genproc_532951_839829468((*p0).module, prc0);
}
LA244: ;
}
LA237: ;
}
LA222: ;
}
LA215: ;
}
break;
case ((Tnodekind292020) 95):
{
genparforstmt_546208_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 157):
{
genstate_544117_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 156):
{
gengotostate_544144_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 158):
{
genbreakstate_544229_839829468(p0, n0);
}
break;
default:
{
NimStringDesc* LOC251;
LOC251 = (NimStringDesc*)0;
LOC251 = rawNewString(reprEnum((NI)(*n0).kind, (&NTI292020))->Sup.len + 25);
appendString(LOC251, ((NimStringDesc*) &T839829468_291));
appendString(LOC251, reprEnum((NI)(*n0).kind, (&NTI292020)));
appendString(LOC251, ((NimStringDesc*) &T839829468_657));
internalerror_196100_155036129((*n0).info, LOC251);
}
break;
}
}
N_NIMCALL(void, genstmts_539244_839829468)(Tcproc529021* p0, Tnode292802* t0) {
Tloc292816 a0;
memset((void*)(&a0), 0, sizeof(a0));
expr_539248_839829468(p0, t0, (&a0));
{
NimStringDesc* LOC5;
if (!!(((7 &(1U<<((NU)(a0.k)&15U)))!=0))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = HEX24_196185_1689653243(T839829468_658);
internalerror_196113_155036129(LOC5);
}
LA3: ;
}
N_NIMCALL(Tnode292802*, myprocess_563402_839829468)(Tpasscontext341002* b0, Tnode292802* n0) {
Tnode292802* result0;
Tcgen529027* m0;
{ result0 = (Tnode292802*)0;
result0 = n0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (b0 == NIM_NIL);
if (LOC3) goto LA4;
LOC3 = skipcodegen_341085_2355241294(n0);
LA4: ;
if (!LOC3) goto LA5;
goto BeforeRet;
}
LA5: ;
m0 = ((Tcgen529027*) (b0));
(*(*m0).initproc).options = initprocoptions_562635_839829468(m0);
genstmts_539244_839829468((*m0).initproc, n0);
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, getsomeinitname_561904_839829468)(Tsym292834* m0, NimStringDesc* suffix0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NimStringDesc* LOC5;
if (!((12288 & (*m0).flags) == 0)) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = mangle_528847_2036603609((*(*(*m0).owner).name).s);
result0 = rope_178277_2381377266(LOC5);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_12));
}
LA3: ;
add_178487_2381377266(&result0, (*(*m0).name).s);
add_178487_2381377266(&result0, suffix0);
return result0;
}
N_NIMCALL(Ropeobj178006*, getinitname_562235_839829468)(Tsym292834* m0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = getsomeinitname_561904_839829468(m0, ((NimStringDesc*) &T839829468_659));
return result0;
}
N_NIMCALL(Ropeobj178006*, getdatinitname_562239_839829468)(Tsym292834* m0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = getsomeinitname_561904_839829468(m0, ((NimStringDesc*) &T839829468_660));
return result0;
}
N_NIMCALL(void, registermoduletomain_562243_839829468)(Tsym292834* m0) {
Ropeobj178006* init0;
Ropeobj178006* datinit0;
TY178507 LOC1;
TY178507 LOC2;
init0 = getinitname_562235_839829468(m0);
datinit0 = getdatinitname_562239_839829468(m0);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = init0;
addf_179205_2381377266(&mainmodprocs_529148_3723162438, ((NimStringDesc*) &T839829468_661), LOC1, 1);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = datinit0;
addf_179205_2381377266(&mainmodprocs_529148_3723162438, ((NimStringDesc*) &T839829468_661), LOC2, 1);
{
TY178507 LOC7;
Ropeobj178006* initcall0;
TY178507 LOC8;
if (!!((((*m0).flags &(1U<<((NU)(((Tsymflag292184) 13))&31U)))!=0))) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = datinit0;
addf_179205_2381377266(&maindatinit_529151_3723162438, ((NimStringDesc*) &T839829468_662), LOC7, 1);
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = init0;
initcall0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_662), LOC8, 1);
{
if (!(((*m0).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0)) goto LA11;
add_178482_2381377266(&mainmodinit_529149_3723162438, initcall0);
}
goto LA9;
LA11: ;
{
add_178482_2381377266(&othermodsinit_529150_3723162438, initcall0);
}
LA9: ;
}
LA5: ;
}
N_NIMCALL(Ropeobj178006*, genfilenames_561688_839829468)(Tcgen529027* m0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_673));
result0 = NIM_NIL;
{
NI i_561717_839829468;
NI HEX3Atmp_561722_839829468;
NI res_561725_839829468;
i_561717_839829468 = (NI)0;
HEX3Atmp_561722_839829468 = (NI)0;
HEX3Atmp_561722_839829468 = ((fileinfos_191629_155036129 ? fileinfos_191629_155036129->Sup.len : 0) - 1);
res_561725_839829468 = ((NI) 0);
{
while (1) {
TY178507 LOC5;
if (!(res_561725_839829468 <= HEX3Atmp_561722_839829468)) goto LA4;
i_561717_839829468 = res_561725_839829468;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = makecstring_191638_155036129(fileinfos_191629_155036129->data[i_561717_839829468].projpath);
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_674), LOC5, 1);
res_561725_839829468 += ((NI) 1);
} LA4: ;
}
}
return result0;
}
N_NIMCALL(void, genmainproc_561729_839829468)(Tcgen529027* m0) {
NimStringDesc* nimmain0;
NimStringDesc* othermain0;
Ropeobj178006* initstackbottomcall0;
TY536475 LOC38;
TY535238 LOC47;
nimmain0 = (NimStringDesc*)0;
othermain0 = (NimStringDesc*)0;
{
NIM_BOOL LOC3;
NIM_BOOL LOC12;
LOC3 = (NIM_BOOL)0;
LOC3 = (targetos_176629_4151366050 == ((Tsystemos176004) 2));
if (!(LOC3)) goto LA4;
LOC3 = !(((gglobaloptions_169130_2607990831 & 1280) == 0));
LA4: ;
if (!LOC3) goto LA5;
{
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 10))&63U)))!=0)) goto LA9;
nimmain0 = copyString(((NimStringDesc*) &T839829468_663));
othermain0 = copyString(((NimStringDesc*) &T839829468_664));
}
goto LA7;
LA9: ;
{
nimmain0 = copyString(((NimStringDesc*) &T839829468_665));
othermain0 = copyString(((NimStringDesc*) &T839829468_666));
}
LA7: ;
LOC12 = (NIM_BOOL)0;
LOC12 = includestr_147249_3771138726((&(*m0).headerfiles), ((NimStringDesc*) &T839829468_667));
}
goto LA1;
LA5: ;
{
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 8))&63U)))!=0)) goto LA14;
nimmain0 = copyString(((NimStringDesc*) &T839829468_665));
othermain0 = copyString(((NimStringDesc*) &T839829468_668));
}
goto LA1;
LA14: ;
{
if (!(targetos_176629_4151366050 == ((Tsystemos176004) 24))) goto LA17;
nimmain0 = copyString(((NimStringDesc*) &T839829468_669));
othermain0 = copyString(((NimStringDesc*) &T839829468_670));
}
goto LA1;
LA17: ;
{
nimmain0 = copyString(((NimStringDesc*) &T839829468_669));
othermain0 = copyString(((NimStringDesc*) &T839829468_671));
}
LA1: ;
{
Ropeobj178006* LOC24;
if (!!((gbreakpoints_548861_839829468 == NIM_NIL))) goto LA22;
LOC24 = (Ropeobj178006*)0;
LOC24 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_672));
}
LA22: ;
{
Ropeobj178006* LOC29;
if (!((goptions_169128_2607990831 &(1U<<((NU)(((Toption169009) 17))&31U)))!=0)) goto LA27;
LOC29 = (Ropeobj178006*)0;
LOC29 = genfilenames_561688_839829468(m0);
add_178482_2381377266(&gbreakpoints_548861_839829468, LOC29);
}
LA27: ;
{
NIM_BOOL LOC32;
LOC32 = (NIM_BOOL)0;
LOC32 = (targetos_176629_4151366050 == ((Tsystemos176004) 24));
if (LOC32) goto LA33;
LOC32 = (gselectedgc_169133_2607990831 == ((Tgcmode169080) 0));
LA33: ;
if (!LOC32) goto LA34;
initstackbottomcall0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_490));
}
goto LA30;
LA34: ;
{
TY533289 LOC37;
memset((void*)LOC37, 0, sizeof(LOC37));
initstackbottomcall0 = ropecg_532407_839829468(m0, ((NimStringDesc*) &T839829468_675), LOC37, 0);
}
LA30: ;
(*m0).labels += ((NI) 1);
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = maindatinit_529151_3723162438;
LOC38[1] = gbreakpoints_548861_839829468;
LOC38[2] = othermodsinit_529150_3723162438;
{
NIM_BOOL LOC41;
TY533289 LOC45;
LOC41 = (NIM_BOOL)0;
LOC41 = emulatedthreadvars_532949_839829468();
if (!(LOC41)) goto LA42;
LOC41 = !((targetos_176629_4151366050 == ((Tsystemos176004) 24)));
LA42: ;
if (!LOC41) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC38[3] = ropecg_532407_839829468(m0, ((NimStringDesc*) &T839829468_677), LOC45, 0);
}
goto LA39;
LA43: ;
{
LOC38[3] = rope_178277_2381377266(((NimStringDesc*) &T839829468_490));
}
LA39: ;
LOC38[4] = initstackbottomcall0;
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 10))- 0], ((NimStringDesc*) &T839829468_676), LOC38, 5);
memset((void*)LOC47, 0, sizeof(LOC47));
LOC47[0] = mainmodinit_529149_3723162438;
LOC47[1] = initstackbottomcall0;
LOC47[2] = rope_178401_2381377266(((NI64) ((*m0).labels)));
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 10))- 0], nimmain0, LOC47, 3);
{
TY533289 LOC52;
if (!!(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 20))&63U)))!=0))) goto LA50;
memset((void*)LOC52, 0, sizeof(LOC52));
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 10))- 0], othermain0, LOC52, 0);
}
LA50: ;
}
N_NIMCALL(Tnode292802*, myclose_563830_839829468)(Tpasscontext341002* b0, Tnode292802* n0) {
Tnode292802* result0;
Tcgen529027* m0;
{ result0 = (Tnode292802*)0;
result0 = n0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (b0 == NIM_NIL);
if (LOC3) goto LA4;
LOC3 = skipcodegen_341085_2355241294(n0);
LA4: ;
if (!LOC3) goto LA5;
goto BeforeRet;
}
LA5: ;
m0 = ((Tcgen529027*) (b0));
{
if (!!((n0 == NIM_NIL))) goto LA9;
(*(*m0).initproc).options = initprocoptions_562635_839829468(m0);
genstmts_539244_839829468((*m0).initproc, n0);
}
LA9: ;
registermoduletomain_562243_839829468((*m0).module);
{
Tnode292802* disp0;
if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0)) goto LA13;
(*m0).flags |= ((NU8)1)<<((((Codegenflag529025) 5))%(sizeof(NU8)*8));
disp0 = generatemethoddispatchers_432151_3853300031();
{
NI i_563891_839829468;
NI HEX3Atmp_563895_839829468;
NI LOC16;
NI res_563898_839829468;
i_563891_839829468 = (NI)0;
HEX3Atmp_563895_839829468 = (NI)0;
LOC16 = (NI)0;
LOC16 = sonslen_295351_850551059(disp0);
HEX3Atmp_563895_839829468 = (NI)(LOC16 - ((NI) 1));
res_563898_839829468 = ((NI) 0);
{
while (1) {
if (!(res_563898_839829468 <= HEX3Atmp_563895_839829468)) goto LA18;
i_563891_839829468 = res_563898_839829468;
genprocaux_560284_839829468(m0, (*(*disp0).kindU.S6.sons->data[i_563891_839829468]).kindU.S4.sym);
res_563898_839829468 += ((NI) 1);
} LA18: ;
}
}
genmainproc_561729_839829468(m0);
}
LA13: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, finishmodule_563420_839829468)(Tcgen529027* m0) {
NI i0;
i0 = ((NI) 0);
{
while (1) {
Tsym292834* prc0;
if (!(i0 <= ((*m0).forwardedprocs ? ((*m0).forwardedprocs->Sup.len-1) : -1))) goto LA2;
prc0 = (*m0).forwardedprocs->data[i0];
{
NimStringDesc* LOC7;
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 4))&31U)))!=0)) goto LA5;
LOC7 = (NimStringDesc*)0;
LOC7 = rawNewString((*(*prc0).name).s->Sup.len + 17);
appendString(LOC7, ((NimStringDesc*) &T839829468_678));
appendString(LOC7, (*(*prc0).name).s);
internalerror_196100_155036129((*prc0).info, LOC7);
}
LA5: ;
genprocnoforward_560906_839829468(m0, prc0);
i0 += ((NI) 1);
} LA2: ;
}
gforwardedprocscounter_529171_3723162438 -= i0;
(*m0).forwardedprocs = (Tsymseq292804*) setLengthSeq(&((*m0).forwardedprocs)->Sup, sizeof(Tsym292834*), ((NI) 0));
}
N_NIMCALL(void, geninitcode_562286_839829468)(Tcgen529027* m0) {
Ropeobj178006* initname0;
Ropeobj178006* prc0;
TY178507 LOC1;
Ropeobj178006* LOC12;
Ropeobj178006* LOC13;
Ropeobj178006** LOC14;
Ropeobj178006** LOC15;
Ropeobj178006** LOC16;
Ropeobj178006* LOC17;
Ropeobj178006* LOC33;
Ropeobj178006** LOC34;
Ropeobj178006** LOC35;
Ropeobj178006** LOC36;
Ropeobj178006* LOC37;
Ropeobj178006* LOC38;
Ropeobj178006** LOC39;
Ropeobj178006** LOC40;
Ropeobj178006** LOC41;
Ropeobj178006* LOC42;
Ropeobj178006* LOC50;
TY533289 LOC51;
TY178507 LOC52;
TY533289 LOC58;
initname0 = getinitname_562235_839829468((*m0).module);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = initname0;
prc0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_679), LOC1, 1);
{
TY532811 LOC6;
if (!(((NI) 0) < (*m0).typenodes)) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = (*m0).typenodesname;
LOC6[1] = rope_178401_2381377266(((NI64) ((*m0).typenodes)));
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_680), LOC6, 2);
}
LA4: ;
{
TY532811 LOC11;
if (!(((NI) 0) < (*m0).nimtypes)) goto LA9;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = (*m0).nimtypesname;
LOC11[1] = rope_178401_2381377266(((NI64) ((*m0).nimtypes)));
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_681), LOC11, 2);
}
LA9: ;
LOC12 = (Ropeobj178006*)0;
LOC12 = initgcframe_538435_839829468((*m0).initproc);
add_178482_2381377266(&prc0, LOC12);
LOC13 = (Ropeobj178006*)0;
LOC13 = gensectionstart_530081_2760143328(((Tcprocsection529011) 0));
add_178482_2381377266(&prc0, LOC13);
LOC14 = (Ropeobj178006**)0;
LOC14 = s_529179_3723162438((*m0).preinitproc, ((Tcprocsection529011) 0));
add_178482_2381377266(&prc0, (*LOC14));
LOC15 = (Ropeobj178006**)0;
LOC15 = s_529179_3723162438((*m0).initproc, ((Tcprocsection529011) 0));
add_178482_2381377266(&prc0, (*LOC15));
LOC16 = (Ropeobj178006**)0;
LOC16 = s_529179_3723162438((*m0).postinitproc, ((Tcprocsection529011) 0));
add_178482_2381377266(&prc0, (*LOC16));
LOC17 = (Ropeobj178006*)0;
LOC17 = gensectionend_530116_2760143328(((Tcprocsection529011) 0));
add_178482_2381377266(&prc0, LOC17);
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = (((*(*m0).initproc).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0);
if (!(LOC20)) goto LA21;
LOC20 = !((((*m0).flags &(1U<<((NU)(((Codegenflag529025) 2))&7U)))!=0));
LA21: ;
if (!LOC20) goto LA22;
(*m0).flags |= ((NU8)1)<<((((Codegenflag529025) 2))%(sizeof(NU8)*8));
{
Ropeobj178006* procname0;
Ropeobj178006* LOC28;
Ropeobj178006* LOC29;
if (!!((((*m0).flags &(1U<<((NU)(((Codegenflag529025) 0))&7U)))!=0))) goto LA26;
procname0 = makecstring_191638_155036129((*(*(*m0).module).name).s);
LOC28 = (Ropeobj178006*)0;
LOC28 = quotedfilename_196818_155036129((*(*m0).module).info);
LOC29 = (Ropeobj178006*)0;
LOC29 = initframe_560140_839829468((*m0).initproc, procname0, LOC28);
add_178482_2381377266(&prc0, LOC29);
}
goto LA24;
LA26: ;
{
TY533289 LOC31;
Ropeobj178006* LOC32;
memset((void*)LOC31, 0, sizeof(LOC31));
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_682), LOC31, 0);
add_178482_2381377266(&prc0, LOC32);
}
LA24: ;
}
LA22: ;
LOC33 = (Ropeobj178006*)0;
LOC33 = gensectionstart_530081_2760143328(((Tcprocsection529011) 1));
add_178482_2381377266(&prc0, LOC33);
LOC34 = (Ropeobj178006**)0;
LOC34 = s_529179_3723162438((*m0).preinitproc, ((Tcprocsection529011) 1));
add_178482_2381377266(&prc0, (*LOC34));
LOC35 = (Ropeobj178006**)0;
LOC35 = s_529179_3723162438((*m0).initproc, ((Tcprocsection529011) 1));
add_178482_2381377266(&prc0, (*LOC35));
LOC36 = (Ropeobj178006**)0;
LOC36 = s_529179_3723162438((*m0).postinitproc, ((Tcprocsection529011) 1));
add_178482_2381377266(&prc0, (*LOC36));
LOC37 = (Ropeobj178006*)0;
LOC37 = gensectionend_530116_2760143328(((Tcprocsection529011) 1));
add_178482_2381377266(&prc0, LOC37);
LOC38 = (Ropeobj178006*)0;
LOC38 = gensectionstart_530081_2760143328(((Tcprocsection529011) 2));
add_178482_2381377266(&prc0, LOC38);
LOC39 = (Ropeobj178006**)0;
LOC39 = s_529179_3723162438((*m0).preinitproc, ((Tcprocsection529011) 2));
add_178482_2381377266(&prc0, (*LOC39));
LOC40 = (Ropeobj178006**)0;
LOC40 = s_529179_3723162438((*m0).initproc, ((Tcprocsection529011) 2));
add_178482_2381377266(&prc0, (*LOC40));
LOC41 = (Ropeobj178006**)0;
LOC41 = s_529179_3723162438((*m0).postinitproc, ((Tcprocsection529011) 2));
add_178482_2381377266(&prc0, (*LOC41));
LOC42 = (Ropeobj178006*)0;
LOC42 = gensectionend_530116_2760143328(((Tcprocsection529011) 2));
add_178482_2381377266(&prc0, LOC42);
{
NIM_BOOL LOC45;
Ropeobj178006* LOC49;
LOC45 = (NIM_BOOL)0;
LOC45 = (((*(*m0).initproc).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0);
if (!(LOC45)) goto LA46;
LOC45 = !((((*m0).flags &(1U<<((NU)(((Codegenflag529025) 0))&7U)))!=0));
LA46: ;
if (!LOC45) goto LA47;
LOC49 = (Ropeobj178006*)0;
LOC49 = deinitframe_560150_839829468((*m0).initproc);
add_178482_2381377266(&prc0, LOC49);
}
LA47: ;
LOC50 = (Ropeobj178006*)0;
LOC50 = deinitgcframe_538441_839829468((*m0).initproc);
add_178482_2381377266(&prc0, LOC50);
memset((void*)LOC51, 0, sizeof(LOC51));
addf_179205_2381377266(&prc0, ((NimStringDesc*) &T839829468_683), LOC51, 0);
memset((void*)LOC52, 0, sizeof(LOC52));
LOC52[0] = getdatinitname_562239_839829468((*m0).module);
addf_179205_2381377266(&prc0, ((NimStringDesc*) &T839829468_679), LOC52, 1);
{
Tcfilesection529005 i_562401_839829468;
NI res_562482_839829468;
i_562401_839829468 = (Tcfilesection529005)0;
res_562482_839829468 = ((NI) 12);
{
while (1) {
Ropeobj178006* LOC56;
Ropeobj178006* LOC57;
if (!(res_562482_839829468 <= ((NI) 16))) goto LA55;
i_562401_839829468 = ((Tcfilesection529005) (res_562482_839829468));
LOC56 = (Ropeobj178006*)0;
LOC56 = gensectionstart_530015_2760143328(i_562401_839829468);
add_178482_2381377266(&prc0, LOC56);
add_178482_2381377266(&prc0, (*m0).s[(i_562401_839829468)- 0]);
LOC57 = (Ropeobj178006*)0;
LOC57 = gensectionend_530050_2760143328(i_562401_839829468);
add_178482_2381377266(&prc0, LOC57);
res_562482_839829468 += ((NI) 1);
} LA55: ;
}
}
memset((void*)LOC58, 0, sizeof(LOC58));
addf_179205_2381377266(&prc0, ((NimStringDesc*) &T839829468_683), LOC58, 0);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 11))- 0], prc0);
{
NIM_CHAR i_562442_839829468;
Ropeobj178006* el_562443_839829468;
TY529136 HEX3Atmp_562487_839829468;
NIM_CHAR i_562490_839829468;
i_562442_839829468 = (NIM_CHAR)0;
el_562443_839829468 = (Ropeobj178006*)0;
memset((void*)HEX3Atmp_562487_839829468, 0, sizeof(HEX3Atmp_562487_839829468));
memcpy((void*)HEX3Atmp_562487_839829468, (NIM_CONST void*)(*m0).extensionloaders, sizeof(HEX3Atmp_562487_839829468));
i_562490_839829468 = 48;
{
if (!((NU8)(((NIM_CHAR) (((NU8)(i_562490_839829468))))) <= (NU8)(57))) goto LA62;
{
while (1) {
i_562442_839829468 = i_562490_839829468;
el_562443_839829468 = HEX3Atmp_562487_839829468[(((NU8)(i_562490_839829468)))- 48];
{
Ropeobj178006* ex0;
TY532811 LOC70;
if (!!((el_562443_839829468 == NIM_NIL))) goto LA68;
memset((void*)LOC70, 0, sizeof(LOC70));
LOC70[0] = rope_178401_2381377266(((NI64) ((NI)(((NI) (((NU8)(i_562442_839829468)))) - ((NI) 48)))));
LOC70[1] = el_562443_839829468;
ex0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_684), LOC70, 2);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 11))- 0], ex0);
}
LA68: ;
{
if (!((NU8)(57) <= (NU8)(((NIM_CHAR) (((NU8)(i_562490_839829468))))))) goto LA73;
goto LA64;
}
LA73: ;
i_562490_839829468 += ((NI) 1);
}
} LA64: ;
}
LA62: ;
}
}
N_NIMCALL(void, finishtypedescriptions_535842_839829468)(Tcgen529027* m0) {
NI i0;
i0 = ((NI) 0);
{
while (1) {
Ropeobj178006* LOC3;
if (!(i0 < ((*m0).typestack ? (*m0).typestack->Sup.len : 0))) goto LA2;
LOC3 = (Ropeobj178006*)0;
LOC3 = gettypedesc_535673_839829468(m0, (*m0).typestack->data[i0]);
i0 += ((NI) 1);
} LA2: ;
}
}
N_NIMCALL(Ropeobj178006*, getcopyright_561665_839829468)(NimStringDesc* cfile0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
TY178507 LOC5;
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 4))&63U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_178277_2381377266(((NimStringDesc*) &T839829468_686));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_685), LOC5, 1);
}
goto LA1;
LA3: ;
{
TY536475 LOC7;
NimStringDesc* LOC8;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rope_178277_2381377266(((NimStringDesc*) &T839829468_686));
LOC7[1] = rope_178277_2381377266(Os_176068_4151366050[(targetos_176629_4151366050)- 1].Field0);
LOC7[2] = rope_178277_2381377266(Cpu_176496_4151366050[(targetcpu_176627_4151366050)- 1].Field0);
LOC7[3] = rope_178277_2381377266(Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field0);
LOC8 = (NimStringDesc*)0;
LOC8 = getcompilecfilecmd_274284_2528170400(cfile0, NIM_FALSE);
LOC7[4] = rope_178277_2381377266(LOC8);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_687), LOC7, 5);
}
LA1: ;
return result0;
}
static N_INLINE(void, addinttypes_561659_839829468)(Ropeobj178006** result0) {
NimStringDesc* LOC1;
TY178507 LOC2;
LOC1 = (NimStringDesc*)0;
LOC1 = rawNewString(tnl_176644_4151366050->Sup.len + 22);
appendString(LOC1, ((NimStringDesc*) &T839829468_688));
appendString(LOC1, tnl_176644_4151366050);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = rope_178401_2381377266(((NI64) (Cpu_176496_4151366050[(targetcpu_176627_4151366050)- 1].Field1)));
addf_179205_2381377266(result0, LOC1, LOC2, 1);
}
N_NIMCALL(Ropeobj178006*, getfileheader_561683_839829468)(NimStringDesc* cfile0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = getcopyright_561665_839829468(cfile0);
addinttypes_561659_839829468(&result0);
return result0;
}
N_NIMCALL(void, generatethreadlocalstorage_538717_839829468)(Tcgen529027* m0) {
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
TY178507 LOC13;
LOC3 = (NIM_BOOL)0;
LOC3 = !((nimtv_538656_839829468 == NIM_NIL));
if (!(LOC3)) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = (((*m0).flags &(1U<<((NU)(((Codegenflag529025) 1))&7U)))!=0);
if (LOC5) goto LA6;
LOC5 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0);
LA6: ;
LOC3 = LOC5;
LA4: ;
if (!LOC3) goto LA7;
{
Ttype292840* t_538761_839829468;
NI i_538768_839829468;
NI L_538770_839829468;
t_538761_839829468 = (Ttype292840*)0;
i_538768_839829468 = ((NI) 0);
L_538770_839829468 = (nimtvdeps_538674_839829468 ? nimtvdeps_538674_839829468->Sup.len : 0);
{
while (1) {
Ropeobj178006* LOC12;
if (!(i_538768_839829468 < L_538770_839829468)) goto LA11;
t_538761_839829468 = nimtvdeps_538674_839829468->data[i_538768_839829468];
LOC12 = (Ropeobj178006*)0;
LOC12 = gettypedesc_535673_839829468(m0, t_538761_839829468);
i_538768_839829468 += ((NI) 1);
} LA11: ;
}
}
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = nimtv_538656_839829468;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 4))- 0], ((NimStringDesc*) &T839829468_689), LOC13, 1);
}
LA7: ;
}
N_NIMCALL(void, generateheaders_560104_839829468)(Tcgen529027* m0) {
NimStringDesc* LOC1;
Tstrentry147009* it0;
LOC1 = (NimStringDesc*)0;
LOC1 = rawNewString(tnl_176644_4151366050->Sup.len + tnl_176644_4151366050->Sup.len + 20);
appendString(LOC1, tnl_176644_4151366050);
appendString(LOC1, ((NimStringDesc*) &T839829468_690));
appendString(LOC1, tnl_176644_4151366050);
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 1))- 0], LOC1);
it0 = ((Tstrentry147009*) ((*m0).headerfiles.head));
{
while (1) {
if (!!((it0 == NIM_NIL))) goto LA3;
{
NimStringDesc* LOC8;
NimStringDesc* LOC9;
Ropeobj178006* LOC10;
if (!((NU8)((*it0).data->data[((NI) 0)]) == (NU8)(35))) goto LA6;
LOC8 = (NimStringDesc*)0;
LOC9 = (NimStringDesc*)0;
LOC9 = nsuReplaceChar((*it0).data, 96, 34);
LOC8 = rawNewString(LOC9->Sup.len + tnl_176644_4151366050->Sup.len + 0);
appendString(LOC8, LOC9);
appendString(LOC8, tnl_176644_4151366050);
LOC10 = (Ropeobj178006*)0;
LOC10 = rope_178277_2381377266(LOC8);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 1))- 0], LOC10);
}
goto LA4;
LA6: ;
{
TY178507 LOC14;
if (!!((((NU8)((*it0).data->data[((NI) 0)])) == ((NU8)(34)) || ((NU8)((*it0).data->data[((NI) 0)])) == ((NU8)(60))))) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rope_178277_2381377266((*it0).data);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 1))- 0], ((NimStringDesc*) &T839829468_691), LOC14, 1);
}
goto LA4;
LA12: ;
{
TY178507 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rope_178277_2381377266((*it0).data);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 1))- 0], ((NimStringDesc*) &T839829468_692), LOC16, 1);
}
LA4: ;
it0 = ((Tstrentry147009*) ((*it0).Sup.next));
} LA3: ;
}
}
N_NIMCALL(Ropeobj178006*, genmodule_562491_839829468)(Tcgen529027* m0, NimStringDesc* cfile0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
result0 = getfileheader_561683_839829468(cfile0);
LOC1 = (Ropeobj178006*)0;
LOC1 = genmergeinfo_530203_2760143328(m0);
add_178482_2381377266(&result0, LOC1);
generatethreadlocalstorage_538717_839829468(m0);
generateheaders_560104_839829468(m0);
{
Tcfilesection529005 i_562614_839829468;
NI res_562622_839829468;
i_562614_839829468 = (Tcfilesection529005)0;
res_562622_839829468 = ((NI) 1);
{
while (1) {
Ropeobj178006* LOC5;
Ropeobj178006* LOC6;
if (!(res_562622_839829468 <= ((NI) 10))) goto LA4;
i_562614_839829468 = ((Tcfilesection529005) (res_562622_839829468));
LOC5 = (Ropeobj178006*)0;
LOC5 = gensectionstart_530015_2760143328(i_562614_839829468);
add_178482_2381377266(&result0, LOC5);
add_178482_2381377266(&result0, (*m0).s[(i_562614_839829468)- 0]);
LOC6 = (Ropeobj178006*)0;
LOC6 = gensectionend_530050_2760143328(i_562614_839829468);
add_178482_2381377266(&result0, LOC6);
res_562622_839829468 += ((NI) 1);
} LA4: ;
}
}
add_178482_2381377266(&result0, (*m0).s[(((Tcfilesection529005) 11))- 0]);
return result0;
}
N_NIMCALL(void, updatecachedmodule_563813_839829468)(Tcgen529027* m0) {
NimStringDesc* cfile0;
NimStringDesc* cfilenoext0;
cfile0 = getcfile_563201_839829468(m0);
cfilenoext0 = noschangeFileExt(cfile0, ((NimStringDesc*) &T839829468_490));
{
NIM_BOOL LOC3;
Ropeobj178006* code0;
LOC3 = (NIM_BOOL)0;
LOC3 = mergerequired_530832_2760143328(m0);
if (!(LOC3)) goto LA4;
LOC3 = !((((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0));
LA4: ;
if (!LOC3) goto LA5;
mergefiles_531241_2760143328(cfile0, m0);
geninitcode_562286_839829468(m0);
finishtypedescriptions_535842_839829468(m0);
code0 = genmodule_562491_839829468(m0, cfile0);
writerope_178836_2381377266(code0, cfile0, NIM_FALSE);
addfiletocompile_273863_2528170400(cfile0);
}
LA5: ;
addfiletolink_273872_2528170400(cfilenoext0);
}
N_NIMCALL(void, generatethreadvarssize_538771_839829468)(Tcgen529027* m0) {
{
NimStringDesc* externc0;
TY178507 LOC12;
if (!!((nimtv_538656_839829468 == NIM_NIL))) goto LA3;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = !((gcmd_169132_2607990831 == ((Tcommands169076) 2)));
if (!(LOC7)) goto LA8;
LOC7 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA8: ;
if (!LOC7) goto LA9;
externc0 = copyString(((NimStringDesc*) &T839829468_693));
}
goto LA5;
LA9: ;
{
externc0 = copyString(((NimStringDesc*) &T839829468_490));
}
LA5: ;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rope_178277_2381377266(externc0);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 10))- 0], ((NimStringDesc*) &T839829468_694), LOC12, 1);
}
LA3: ;
}
N_NIMCALL(NIM_BOOL, shouldrecompile_563621_839829468)(Ropeobj178006* code0, NimStringDesc* cfile0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
result0 = NIM_TRUE;
{
NimStringDesc* objfile0;
if (!!(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 1))&63U)))!=0))) goto LA3;
objfile0 = toobjfile_273859_2528170400(cfile0);
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = writeropeifnotequal_179511_2381377266(code0, cfile0);
if (!LOC7) goto LA8;
goto BeforeRet;
}
LA8: ;
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = nosexistsFile(objfile0);
if (!(LOC12)) goto LA13;
LOC12 = nosfileNewer(objfile0, cfile0);
LA13: ;
if (!LOC12) goto LA14;
result0 = NIM_FALSE;
}
LA14: ;
}
goto LA1;
LA3: ;
{
writerope_178836_2381377266(code0, cfile0, NIM_FALSE);
}
LA1: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, writemodule_563637_839829468)(Tcgen529027* m0, NIM_BOOL pending0) {
NimStringDesc* cfile0;
NimStringDesc* cfilenoext0;
cfile0 = getcfile_563201_839829468(m0);
cfilenoext0 = noschangeFileExt(cfile0, ((NimStringDesc*) &T839829468_490));
{
NIM_BOOL LOC3;
Ropeobj178006* code0;
LOC3 = (NIM_BOOL)0;
LOC3 = !((*m0).Sup.fromcache);
if (LOC3) goto LA4;
LOC3 = ((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 1))&63U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
geninitcode_562286_839829468(m0);
finishtypedescriptions_535842_839829468(m0);
{
if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0)) goto LA9;
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 7))- 0], mainmodprocs_529148_3723162438);
generatethreadvarssize_538771_839829468(m0);
}
LA9: ;
code0 = genmodule_562491_839829468(m0, cfile0);
{
NIM_BOOL LOC13;
LOC13 = (NIM_BOOL)0;
LOC13 = shouldrecompile_563621_839829468(code0, cfile0);
if (!LOC13) goto LA14;
addfiletocompile_273863_2528170400(cfile0);
}
LA14: ;
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC17;
NIM_BOOL LOC18;
Ropeobj178006* code0;
LOC17 = (NIM_BOOL)0;
LOC18 = (NIM_BOOL)0;
LOC18 = pending0;
if (!(LOC18)) goto LA19;
LOC18 = mergerequired_530832_2760143328(m0);
LA19: ;
LOC17 = LOC18;
if (!(LOC17)) goto LA20;
LOC17 = !((((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0));
LA20: ;
if (!LOC17) goto LA21;
mergefiles_531241_2760143328(cfile0, m0);
geninitcode_562286_839829468(m0);
finishtypedescriptions_535842_839829468(m0);
code0 = genmodule_562491_839829468(m0, cfile0);
writerope_178836_2381377266(code0, cfile0, NIM_FALSE);
addfiletocompile_273863_2528170400(cfile0);
}
goto LA1;
LA21: ;
{
NimStringDesc* LOC24;
NIM_BOOL LOC25;
LOC24 = (NimStringDesc*)0;
LOC24 = toobjfile_273859_2528170400(cfilenoext0);
LOC25 = (NIM_BOOL)0;
LOC25 = nosexistsFile(LOC24);
if (!!(LOC25)) goto LA26;
addfiletocompile_273863_2528170400(cfile0);
}
goto LA1;
LA26: ;
LA1: ;
addfiletolink_273872_2528170400(cfilenoext0);
}
N_NIMCALL(void, writeheader_563149_839829468)(Tcgen529027* m0) {
Ropeobj178006* result0;
Ropeobj178006* guard0;
TY178507 LOC1;
TY128506 LOC2;
TY178507 LOC3;
TY533289 LOC13;
TY178507 LOC14;
result0 = getcopyright_561665_839829468((*m0).filename);
memset((void*)LOC1, 0, sizeof(LOC1));
memset((void*)(&LOC2), 0, sizeof(LOC2));
nossplitFile((*m0).filename, (&LOC2));
LOC1[0] = rope_178277_2381377266(LOC2.Field1);
guard0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_695), LOC1, 1);
memset((void*)LOC3, 0, sizeof(LOC3));
LOC3[0] = guard0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_696), LOC3, 1);
addinttypes_561659_839829468(&result0);
generateheaders_560104_839829468(m0);
generatethreadlocalstorage_538717_839829468(m0);
{
Tcfilesection529005 i_563171_839829468;
NI res_563197_839829468;
i_563171_839829468 = (Tcfilesection529005)0;
res_563197_839829468 = ((NI) 1);
{
while (1) {
Ropeobj178006* LOC7;
Ropeobj178006* LOC8;
if (!(res_563197_839829468 <= ((NI) 10))) goto LA6;
i_563171_839829468 = ((Tcfilesection529005) (res_563197_839829468));
LOC7 = (Ropeobj178006*)0;
LOC7 = gensectionstart_530015_2760143328(i_563171_839829468);
add_178482_2381377266(&result0, LOC7);
add_178482_2381377266(&result0, (*m0).s[(i_563171_839829468)- 0]);
LOC8 = (Ropeobj178006*)0;
LOC8 = gensectionend_530050_2760143328(i_563171_839829468);
add_178482_2381377266(&result0, LOC8);
res_563197_839829468 += ((NI) 1);
} LA6: ;
}
}
add_178482_2381377266(&result0, (*m0).s[(((Tcfilesection529005) 11))- 0]);
{
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 8))&63U)))!=0)) goto LA11;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_22));
}
LA11: ;
memset((void*)LOC13, 0, sizeof(LOC13));
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_697), LOC13, 0);
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = guard0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_698), LOC14, 1);
writerope_178836_2381377266(result0, (*m0).filename, NIM_FALSE);
}
N_NIMCALL(void, cgenwritemodules_563902_839829468)(void) {
{
if (!!((generatedheader_532201_839829468 == NIM_NIL))) goto LA3;
finishmodule_563420_839829468(generatedheader_532201_839829468);
}
LA3: ;
{
while (1) {
if (!(((NI) 0) < gforwardedprocscounter_529171_3723162438)) goto LA6;
{
Tcgen529027* m_563916_839829468;
m_563916_839829468 = (Tcgen529027*)0;
{
NI i_563935_839829468;
NI HEX3Atmp_563937_839829468;
NI res_563939_839829468;
i_563935_839829468 = (NI)0;
HEX3Atmp_563937_839829468 = (NI)0;
HEX3Atmp_563937_839829468 = (gmodules_529170_3723162438 ? (gmodules_529170_3723162438->Sup.len-1) : -1);
res_563939_839829468 = ((NI) 0);
{
while (1) {
if (!(res_563939_839829468 <= HEX3Atmp_563937_839829468)) goto LA10;
i_563935_839829468 = res_563939_839829468;
{
if (!!((gmodules_529170_3723162438->data[i_563935_839829468] == NIM_NIL))) goto LA13;
m_563916_839829468 = gmodules_529170_3723162438->data[i_563935_839829468];
{
if (!!((*m_563916_839829468).Sup.fromcache)) goto LA17;
finishmodule_563420_839829468(m_563916_839829468);
}
LA17: ;
}
LA13: ;
res_563939_839829468 += ((NI) 1);
} LA10: ;
}
}
}
} LA6: ;
}
{
Tcgen529027* m_563917_839829468;
m_563917_839829468 = (Tcgen529027*)0;
{
NI i_563946_839829468;
NI HEX3Atmp_563948_839829468;
NI res_563950_839829468;
i_563946_839829468 = (NI)0;
HEX3Atmp_563948_839829468 = (NI)0;
HEX3Atmp_563948_839829468 = (gmodules_529170_3723162438 ? (gmodules_529170_3723162438->Sup.len-1) : -1);
res_563950_839829468 = ((NI) 0);
{
while (1) {
if (!(res_563950_839829468 <= HEX3Atmp_563948_839829468)) goto LA22;
i_563946_839829468 = res_563950_839829468;
{
if (!!((gmodules_529170_3723162438->data[i_563946_839829468] == NIM_NIL))) goto LA25;
m_563917_839829468 = gmodules_529170_3723162438->data[i_563946_839829468];
{
if (!(*m_563917_839829468).Sup.fromcache) goto LA29;
updatecachedmodule_563813_839829468(m_563917_839829468);
}
goto LA27;
LA29: ;
{
writemodule_563637_839829468(m_563917_839829468, NIM_TRUE);
}
LA27: ;
}
LA25: ;
res_563950_839829468 += ((NI) 1);
} LA22: ;
}
}
}
writemapping_274789_2528170400(gmapping_529152_3723162438);
{
if (!!((generatedheader_532201_839829468 == NIM_NIL))) goto LA34;
writeheader_563149_839829468(generatedheader_532201_839829468);
}
LA34: ;
}
N_NIMCALL(void, nullify_562833_839829468)(Ropeobj178006** arr0) {
{
Tcfilesection529005 i_562848_839829468;
NI res_562853_839829468;
i_562848_839829468 = (Tcfilesection529005)0;
res_562853_839829468 = ((NI) 0);
{
while (1) {
if (!(res_562853_839829468 <= ((NI) 17))) goto LA3;
i_562848_839829468 = ((Tcfilesection529005) (res_562853_839829468));
unsureAsgnRef((void**) (&arr0[(i_562848_839829468)- 0]), NIM_NIL);
res_562853_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(void, nullify_562858_839829468)(Ropeobj178006** arr0) {
{
NIM_CHAR i_563014_839829468;
NI res_563019_839829468;
i_563014_839829468 = (NIM_CHAR)0;
res_563019_839829468 = ((NI) 48);
{
while (1) {
if (!(res_563019_839829468 <= ((NI) 57))) goto LA3;
i_563014_839829468 = ((NIM_CHAR) (res_563019_839829468));
unsureAsgnRef((void**) (&arr0[(((NU8)(i_563014_839829468)))- 48]), NIM_NIL);
res_563019_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(void, resetmodule_562763_839829468)(Tcgen529027* m0) {
initlinkedlist_147031_3771138726((&(*m0).headerfiles));
initintset_268885_2627731572((&(*m0).declaredprotos));
initidtable_296019_850551059((&(*m0).forwtypecache));
asgnRef((void**) (&(*m0).initproc), newproc_529206_3723162438(NIM_NIL, m0));
(*(*m0).initproc).options = initprocoptions_562635_839829468(m0);
asgnRef((void**) (&(*m0).preinitproc), newpreinitproc_562625_839829468(m0));
asgnRef((void**) (&(*m0).postinitproc), newpostinitproc_562630_839829468(m0));
initnodetable_296085_850551059((&(*m0).datacache));
if ((*m0).typestack) nimGCunrefNoCycle((*m0).typestack);
(*m0).typestack = (Ttypeseq292836*) newSeqRC1((&NTI292836), 0);
if ((*m0).forwardedprocs) nimGCunrefNoCycle((*m0).forwardedprocs);
(*m0).forwardedprocs = (Tsymseq292804*) newSeqRC1((&NTI292804), 0);
asgnRefNoCycle((void**) (&(*m0).typenodesname), gettempname_533598_839829468(m0));
asgnRefNoCycle((void**) (&(*m0).nimtypesname), gettempname_533598_839829468(m0));
{
if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 13))&31U)))!=0)) goto LA3;
(*m0).flags |= ((NU8)1)<<((((Codegenflag529025) 0))%(sizeof(NU8)*8));
}
goto LA1;
LA3: ;
{
(*m0).flags &= ~(((NU8)1) << ((((Codegenflag529025) 0)) % (sizeof(NU8)*8)));
}
LA1: ;
nullify_562833_839829468((*m0).s);
(*m0).typenodes = ((NI) 0);
(*m0).nimtypes = ((NI) 0);
nullify_562858_839829468((*m0).extensionloaders);
(*m0).Sup.fromcache = NIM_TRUE;
}
N_NIMCALL(void, resetcgenmodules_563024_839829468)(void) {
{
Tcgen529027* m_563026_839829468;
m_563026_839829468 = (Tcgen529027*)0;
{
NI i_563031_839829468;
NI HEX3Atmp_563033_839829468;
NI res_563035_839829468;
i_563031_839829468 = (NI)0;
HEX3Atmp_563033_839829468 = (NI)0;
HEX3Atmp_563033_839829468 = (gmodules_529170_3723162438 ? (gmodules_529170_3723162438->Sup.len-1) : -1);
res_563035_839829468 = ((NI) 0);
{
while (1) {
if (!(res_563035_839829468 <= HEX3Atmp_563033_839829468)) goto LA4;
i_563031_839829468 = res_563035_839829468;
{
if (!!((gmodules_529170_3723162438->data[i_563031_839829468] == NIM_NIL))) goto LA7;
m_563026_839829468 = gmodules_529170_3723162438->data[i_563031_839829468];
resetmodule_562763_839829468(m_563026_839829468);
}
LA7: ;
res_563035_839829468 += ((NI) 1);
} LA4: ;
}
}
}
}
NIM_EXTERNC N_NOINLINE(void, compiler_cgenInit000)(void) {
nimRegisterGlobalMarker(T839829468_2);
nimRegisterGlobalMarker(T839829468_3);
nimRegisterGlobalMarker(T839829468_5);
nimRegisterGlobalMarker(T839829468_6);
nimRegisterGlobalMarker(T839829468_7);
nimRegisterGlobalMarker(T839829468_8);
asgnRefNoCycle((void**) (&indent_532655_839829468), rope_178277_2381377266(((NimStringDesc*) &T839829468_4)));
if (nimtvdeps_538674_839829468) nimGCunrefNoCycle(nimtvdeps_538674_839829468);
nimtvdeps_538674_839829468 = (Ttypeseq292836*) newSeqRC1((&NTI292836), 0);
chckNil((void*)(&nimtvdeclared_538675_839829468));
genericReset((void*)(&nimtvdeclared_538675_839829468), (&NTI268030));
initintset_268885_2627731572((&nimtvdeclared_538675_839829468));
breakpointid_548860_839829468 = ((NI) 0);
}
NIM_EXTERNC N_NOINLINE(void, compiler_cgenDatInit000)(void) {
}
|
mpi_omp_hl.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <mpi.h>
#include <omp.h>
#include "papi.h"
#include "papi_test.h"
#include "do_loops.h"
int main( int argc, char **argv )
{
int retval, i;
int quiet = 0;
char* region_name;
int world_size, world_rank;
/* Set TESTS_QUIET variable */
quiet = tests_quiet( argc, argv );
MPI_Init( &argc, &argv );
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
region_name = "do_flops";
#pragma omp parallel
#pragma omp for
for ( i = 1; i <= 2; ++i ) {
int tid;
tid = omp_get_thread_num();
if ( !quiet ) {
printf("\nRank %d, Thread %d: instrument flops\n", world_rank, tid);
}
retval = PAPI_hl_region_begin(region_name);
if ( retval != PAPI_OK ) {
test_fail( __FILE__, __LINE__, "PAPI_hl_region_begin", retval );
}
do_flops( NUM_FLOPS );
retval = PAPI_hl_region_end(region_name);
if ( retval != PAPI_OK ) {
test_fail( __FILE__, __LINE__, "PAPI_hl_region_end", retval );
}
}
MPI_Finalize();
test_hl_pass( __FILE__ );
return 0;
} |
ast-dump-openmp-target-parallel-for-simd.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp target parallel for simd
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp target parallel for simd
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp target parallel for simd collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp target parallel for simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp target parallel for simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-parallel-for-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPTargetParallelForSimdDirective {{.*}} <line:4:1, col:37>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPTargetParallelForSimdDirective {{.*}} <line:10:1, col:37>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPTargetParallelForSimdDirective {{.*}} <line:17:1, col:49>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:38, col:48>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:47> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:47> 'int' 1
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPTargetParallelForSimdDirective {{.*}} <line:24:1, col:49>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:38, col:48>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:47> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:47> 'int' 2
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPTargetParallelForSimdDirective {{.*}} <line:31:1, col:49>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:38, col:48>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:47> 'int'
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:47> 'int' 2
// CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
zoom.c | // This program is free software: you can use, modify and/or redistribute it
// under the terms of the simplified BSD License. You should have received a
// copy of this license along this program. If not, see
// <http://www.opensource.org/licenses/bsd-license.html>.
//
// Copyright (C) 2012, Javier Sánchez Pérez <jsanchez@dis.ulpgc.es>
// All rights reserved.
#ifndef ZOOM_C
#define ZOOM_C
#include "xmalloc.c"
#include "mask.c"
#include "bicubic_interpolation.c"
#define ZOOM_SIGMA_ZERO 0.6
/**
*
* Compute the size of a zoomed image from the zoom factor
*
**/
void zoom_size(
int nx, // width of the orignal image
int ny, // height of the orignal image
int *nxx, // width of the zoomed image
int *nyy, // height of the zoomed image
float factor // zoom factor between 0 and 1
)
{
//compute the new size corresponding to factor
//we add 0.5 for rounding off to the closest number
*nxx = (int)((float) nx * factor + 0.5);
*nyy = (int)((float) ny * factor + 0.5);
}
/**
*
* Downsample an image
*
**/
void zoom_out(
const float *I, // input image
float *Iout, // output image
const int nx, // image width
const int ny, // image height
const float factor // zoom factor between 0 and 1
)
{
// temporary working image
float *Is = xmalloc(nx * ny * sizeof*Is);
for(int i = 0; i < nx * ny; i++)
Is[i] = I[i];
// compute the size of the zoomed image
int nxx, nyy;
zoom_size(nx, ny, &nxx, &nyy, factor);
// compute the Gaussian sigma for smoothing
const float sigma = ZOOM_SIGMA_ZERO * sqrt(1.0/(factor*factor) - 1.0);
// pre-smooth the image
if(sigma>0.2) //added by mdelbra to handle sigma=0 CHECK
gaussian(Is, nx, ny, sigma);
// re-sample the image using bicubic interpolation
#pragma omp parallel for
for (int i1 = 0; i1 < nyy; i1++)
for (int j1 = 0; j1 < nxx; j1++)
{
const float i2 = (float) i1 / factor;
const float j2 = (float) j1 / factor;
float g = bicubic_interpolation_at(Is, j2, i2, nx, ny, false);
Iout[i1 * nxx + j1] = g;
}
free(Is);
}
/**
*
* Function to upsample the image
*
**/
void zoom_in(
const float *I, // input image
float *Iout, // output image
int nx, // width of the original image
int ny, // height of the original image
int nxx, // width of the zoomed image
int nyy // height of the zoomed image
)
{
// compute the zoom factor
const float factorx = ((float)nxx / nx);
const float factory = ((float)nyy / ny);
// re-sample the image using bicubic interpolation
#pragma omp parallel for
for (int i1 = 0; i1 < nyy; i1++)
for (int j1 = 0; j1 < nxx; j1++)
{
float i2 = (float) i1 / factory;
float j2 = (float) j1 / factorx;
float g = bicubic_interpolation_at(I, j2, i2, nx, ny, false);
Iout[i1 * nxx + j1] = g;
}
}
#endif//ZOOM_C
|
GB_unaryop__lnot_uint32_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint32_int8
// op(A') function: GB_tran__lnot_uint32_int8
// C type: uint32_t
// A type: int8_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT32 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint32_int8
(
uint32_t *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint32_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
thread_thread_scheduling.c | // RUN: %libomp-compile && env KMP_ABT_NUM_ESS=4 %libomp-run
// REQUIRES: abt
#include "omp_testsuite.h"
#include "bolt_scheduling_util.h"
int test_thread_thread_scheduling(int num_threads) {
int i, vals[num_threads * num_threads];
memset(vals, 0, sizeof(int) * num_threads * num_threads);
omp_set_max_active_levels(2);
timeout_barrier_t barrier;
timeout_barrier_init(&barrier);
#pragma omp parallel num_threads(num_threads)
{
check_num_ess(4);
int parent_tid = omp_get_thread_num();
#pragma omp parallel num_threads(num_threads)
{
if (parent_tid == omp_get_thread_num()) {
timeout_barrier_wait(&barrier, 4);
}
vals[parent_tid * num_threads + omp_get_thread_num()] += 1;
}
}
#pragma omp parallel for num_threads(num_threads)
for (i = 0; i < num_threads; i++) {
check_num_ess(4);
int j, parent_i = i;
#pragma omp parallel for num_threads(num_threads)
for (j = 0; j < num_threads; j++) {
if (parent_i == j) {
timeout_barrier_wait(&barrier, 4);
}
vals[parent_i * num_threads + j] += 2;
}
}
for (i = 0; i < num_threads * num_threads; i++) {
if (vals[i] != 3) {
printf("vals[%d] == %d\n", i, vals[i]);
return 0;
}
}
return 1;
}
int main() {
int i, num_failed = 0;
for (i = 1; i < 3; i++) {
if (!test_thread_thread_scheduling(i * 4)) {
num_failed++;
}
}
return num_failed;
}
|
rawSHA512_fmt_plug.c | /*
* This file is part of John the Ripper password cracker,
* Copyright (c) 2010 by Solar Designer
* based on rawMD4_fmt.c code, with trivial changes by groszek.
*
* Rewritten Spring 2013, JimF. SSE code added and released with the following terms:
* No copyright is claimed, and the software is hereby placed in the public domain.
* In case this attempt to disclaim copyright and place the software in the public
* domain is deemed null and void, then the software is Copyright (c) 2011 JimF
* and it is hereby released to the general public under the following
* terms:
*
* This software may be modified, redistributed, and used for any
* purpose, in source and binary forms, with or without modification.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_raw0_SHA512;
#elif FMT_REGISTERS_H
john_register_one(&fmt_raw0_SHA512);
#else
#include <stdint.h>
#include "arch.h"
#include "sha2.h"
#include "params.h"
#include "common.h"
#include "johnswap.h"
#include "formats.h"
#include "rawSHA512_common.h"
//#undef SIMD_COEF_64
//#undef SIMD_PARA_SHA512
/*
* Only effective for SIMD.
* Undef to disable reversing steps for benchmarking.
*/
#define REVERSE_STEPS
#ifdef _OPENMP
#ifdef SIMD_COEF_64
#ifndef OMP_SCALE
#define OMP_SCALE 1024
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#endif
#include <omp.h>
#endif
#include "simd-intrinsics.h"
#include "memdbg.h"
#define FORMAT_LABEL "Raw-SHA512"
#define FORMAT_NAME ""
#ifdef SIMD_COEF_64
#define ALGORITHM_NAME SHA512_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "32/" ARCH_BITS_STR " " SHA2_LIB
#endif
#ifdef SIMD_COEF_64
#define PLAINTEXT_LENGTH 111
#else
#define PLAINTEXT_LENGTH 125
#endif
#define BINARY_SIZE 8
#define SALT_SIZE 0
#define SALT_ALIGN 1
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#ifdef SIMD_COEF_64
#define FMT_IS_64BIT
#define FMT_IS_BE
#include "common-simd-getpos.h"
static uint64_t (*saved_key);
static uint64_t (*crypt_out);
#else
static int (*saved_len);
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint64_t (*crypt_out)[DIGEST_SIZE / sizeof(uint64_t)];
#endif
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
#ifndef SIMD_COEF_64
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
#else
saved_key = mem_calloc_align(self->params.max_keys_per_crypt * SHA_BUF_SIZ,
sizeof(*saved_key), MEM_ALIGN_SIMD);
crypt_out = mem_calloc_align(self->params.max_keys_per_crypt * 8,
sizeof(*crypt_out), MEM_ALIGN_SIMD);
#endif
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
#ifndef SIMD_COEF_64
MEM_FREE(saved_len);
#endif
}
static void *get_binary(char *ciphertext)
{
static uint64_t *outw;
unsigned char *out;
char *p;
int i;
if (!outw)
outw = mem_calloc_tiny(DIGEST_SIZE, BINARY_ALIGN);
out = (unsigned char*)outw;
p = ciphertext + TAG_LENGTH;
for (i = 0; i < DIGEST_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
#ifdef SIMD_COEF_64
#if ARCH_LITTLE_ENDIAN==1
alter_endianity_to_BE64(out, DIGEST_SIZE/8);
#endif
#ifdef REVERSE_STEPS
sha512_reverse(outw);
#endif
#endif
return out;
}
#ifdef SIMD_COEF_64
#define HASH_IDX (((unsigned int)index&(SIMD_COEF_64-1))+(unsigned int)index/SIMD_COEF_64*8*SIMD_COEF_64)
static int get_hash_0 (int index) { return crypt_out[HASH_IDX] & PH_MASK_0; }
static int get_hash_1 (int index) { return crypt_out[HASH_IDX] & PH_MASK_1; }
static int get_hash_2 (int index) { return crypt_out[HASH_IDX] & PH_MASK_2; }
static int get_hash_3 (int index) { return crypt_out[HASH_IDX] & PH_MASK_3; }
static int get_hash_4 (int index) { return crypt_out[HASH_IDX] & PH_MASK_4; }
static int get_hash_5 (int index) { return crypt_out[HASH_IDX] & PH_MASK_5; }
static int get_hash_6 (int index) { return crypt_out[HASH_IDX] & PH_MASK_6; }
#else
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
#endif
static int binary_hash_0(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_0; }
static int binary_hash_1(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_1; }
static int binary_hash_2(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_2; }
static int binary_hash_3(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_3; }
static int binary_hash_4(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_4; }
static int binary_hash_5(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_5; }
static int binary_hash_6(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_6; }
#define NON_SIMD_SET_SAVED_LEN
#include "common-simd-setkey64.h"
#ifndef REVERSE_STEPS
#undef SSEi_REVERSE_STEPS
#define SSEi_REVERSE_STEPS 0
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
#ifdef SIMD_COEF_64
SIMDSHA512body(&saved_key[index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64],
&crypt_out[index/SIMD_COEF_64*8*SIMD_COEF_64],
NULL, SSEi_REVERSE_STEPS | SSEi_MIXED_IN);
#else
SHA512_CTX ctx;
SHA512_Init(&ctx);
SHA512_Update(&ctx, saved_key[index], saved_len[index]);
SHA512_Final((unsigned char *)crypt_out[index], &ctx);
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
unsigned int index;
for (index = 0; index < count; index++)
#ifdef SIMD_COEF_64
if (((uint64_t*)binary)[0] == crypt_out[HASH_IDX])
#else
if ( ((uint64_t*)binary)[0] == crypt_out[index][0] )
#endif
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_COEF_64
return ((uint64_t*)binary)[0] == crypt_out[HASH_IDX];
#else
return *(uint64_t*)binary == crypt_out[index][0];
#endif
}
static int cmp_exact(char *source, int index)
{
uint64_t *binary = get_binary(source);
char *key = get_key(index);
SHA512_CTX ctx;
uint64_t crypt_out[DIGEST_SIZE / sizeof(uint64_t)];
SHA512_Init(&ctx);
SHA512_Update(&ctx, key, strlen(key));
SHA512_Final((unsigned char*)crypt_out, &ctx);
#ifdef SIMD_COEF_64
#if ARCH_LITTLE_ENDIAN==1
alter_endianity_to_BE64(crypt_out, DIGEST_SIZE/8);
#endif
#ifdef REVERSE_STEPS
sha512_reverse(crypt_out);
#endif
#endif
return !memcmp(binary, crypt_out, DIGEST_SIZE);
}
/*
* The '0_' makes sure this format registers before others,
* if ambiguous. Do not copy it for other formats.
*/
struct fmt_main fmt_raw0_SHA512 = {
{
FORMAT_LABEL,
FORMAT_NAME,
"SHA512 " ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD |
FMT_SPLIT_UNIFIES_CASE,
{ NULL },
{
FORMAT_TAG,
XSHA512_FORMAT_TAG,
NSLDAP_FORMAT_TAG
},
sha512_common_tests_rawsha512_111
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
sha512_common_valid,
sha512_common_split,
get_binary,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
binary_hash_4,
binary_hash_5,
binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_binop__lt_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lt_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__lt_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__lt_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__lt_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_int16)
// A*D function (colscale): GB (_AxD__lt_int16)
// D*A function (rowscale): GB (_DxB__lt_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__lt_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__lt_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_int16)
// C=scalar+B GB (_bind1st__lt_int16)
// C=scalar+B' GB (_bind1st_tran__lt_int16)
// C=A+scalar GB (_bind2nd__lt_int16)
// C=A'+scalar GB (_bind2nd_tran__lt_int16)
// C type: bool
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LT || GxB_NO_INT16 || GxB_NO_LT_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lt_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lt_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lt_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lt_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lt_int16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lt_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lt_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lt_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lt_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lt_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lt_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lt_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__lt_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__lt_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__expm1_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__expm1_fc32_fc32
// op(A') function: GB_unop_tran__expm1_fc32_fc32
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = GB_cexpm1f (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_cexpm1f (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = GB_cexpm1f (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EXPM1 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__expm1_fc32_fc32
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_cexpm1f (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_cexpm1f (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__expm1_fc32_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pwsafe_fmt_plug.c | /* Password Safe and Password Gorilla cracker patch for JtR. Hacked together
* during May of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* Optimization patch during January of 2013 by Brian Wallace <brian.wallace9809 at gmail.com>.
*
* This software is Copyright (c) 2012-2013
* Dhiru Kholia <dhiru.kholia at gmail.com> and Brian Wallace <brian.wallace9809 at gmail.com>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_pwsafe;
#elif FMT_REGISTERS_H
john_register_one(&fmt_pwsafe);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "sha2.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#define OMP_SCALE 1 // tuned on core i7
#endif
#include "memdbg.h"
#define FORMAT_LABEL "pwsafe"
#define FORMAT_NAME "Password Safe"
#define ALGORITHM_NAME "SHA256 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 32
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests pwsafe_tests[] = {
{"$pwsafe$*3*fefc1172093344c9d5577b25f5b4b6e5d2942c94f9fc24c21733e28ae6527521*2048*88cbaf7d8668c1a98263f5dce7cb39c3304c49a3e0d76a7ea475dc02ab2f97a7", "12345678"},
{"$pwsafe$*3*581cd1135b9b993ccb0f6b01c1fcfacd799c69960496c96286f94fe1400c1b25*2048*4ab3c2d3af251e94eb2f753fdf30fb9da074bec6bac0fa9d9d152b95fc5795c6", "openwall"},
{"$pwsafe$*3*34ba0066d0fc594c126b60b9db98b6024e1cf585901b81b5b005ce386f173d4c*2048*cc86f1a5d930ff19b3602770a86586b5d9dea7bb657012aca875aa2a7dc71dc0", "12345678901234567890123"},
{"$pwsafe$*3*a42431191707895fb8d1121a3a6e255e33892d8eecb50fc616adab6185b5affb*2048*0f71d12df2b7c5394ae90771f6475a7ad0437007a8eeb5d9b58e35d8fd57c827", "123456789012345678901234567"},
{"$pwsafe$*3*c380dee0dbb536f5454f78603b020be76b33e294e9c2a0e047f43b9c61669fc8*2048*e88ed54a85e419d555be219d200563ae3ba864e24442826f412867fc0403917d", "this is an 87 character password to test the max bound of pwsafe-opencl................"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static struct custom_salt {
int version;
unsigned int iterations;
char unsigned salt[32];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
// format $pwsafe$version*salt*iterations*hash
char *p;
char *ctcopy;
char *keeptr;
if (strncmp(ciphertext, "$pwsafe$*", 9) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += 9; /* skip over "$pwsafe$*" */
if ((p = strtok(ctcopy, "*")) == NULL) /* version */
goto err;
if (atoi(p) == 0)
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* salt */
goto err;
if (strlen(p) < 64)
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* iterations */
goto err;
if (atoi(p) == 0)
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* hash */
goto err;
if (strlen(p) != 64)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
int i;
static struct custom_salt cs;
ctcopy += 9; /* skip over "$pwsafe$*" */
p = strtok(ctcopy, "*");
cs.version = atoi(p);
p = strtok(NULL, "*");
for (i = 0; i < 32; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "*");
cs.iterations = (unsigned int)atoi(p);
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '*') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; }
static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; }
static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; }
static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; }
static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; }
static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; }
static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
#define rotl(x,y) ( x<<y | x>>(32-y) )
#define rotr(x,y) ( x>>y | x<<(32-y) )
#define CHOICE(x,y,z) ( z ^ (x & ( y ^ z)) )
#define MAJORITY(x,y,z) ( (x & y) | (z & (x | y)) )
#define ROTXOR1(x) (rotr(x,2) ^ rotr(x,13) ^ rotr(x,22))
#define ROTXOR2(x) (rotr(x,6) ^ rotr(x,11) ^ rotr(x,25))
#define ROTXOR3(x) (rotr(x,7) ^ rotr(x,18) ^ (x>>3))
#define ROTXOR4(x) (rotr(x,17) ^ rotr(x,19) ^ (x>>10))
#if ARCH_LITTLE_ENDIAN
#define bytereverse(x) ( ((x) << 24) | (((x) << 8) & 0x00ff0000) | (((x) >> 8) & 0x0000ff00) | ((x) >> 24) )
#else
#define bytereverse(x) (x)
#endif
static void pwsafe_sha256_iterate(unsigned int * state, unsigned int iterations)
{
unsigned int word00,word01,word02,word03,word04,word05,word06,word07;
unsigned int word08,word09,word10,word11,word12,word13,word14,word15;
unsigned int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
iterations++;
word00 = state[0];
word01 = state[1];
word02 = state[2];
word03 = state[3];
word04 = state[4];
word05 = state[5];
word06 = state[6];
word07 = state[7];
while(iterations)
{
iterations--;
temp0 = 0x6a09e667UL;
temp1 = 0xbb67ae85UL;
temp2 = 0x3c6ef372UL;
temp3 = 0xa54ff53aUL;
temp4 = 0x510e527fUL;
temp5 = 0x9b05688cUL;
temp6 = 0x1f83d9abUL;
temp7 = 0x5be0cd19UL;
temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0x428a2f98 + (word00);
temp3 += temp7;
temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 );
temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0x71374491 + (word01);
temp2 += temp6;
temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 );
temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0xb5c0fbcf + (word02);
temp1 += temp5;
temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 );
temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0xe9b5dba5 + (word03);
temp0 += temp4;
temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 );
temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0x3956c25b + (word04);
temp7 += temp3;
temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 );
temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0x59f111f1 + (word05);
temp6 += temp2;
temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 );
temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0x923f82a4 + (word06);
temp5 += temp1;
temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 );
temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0xab1c5ed5 + (word07);
temp4 += temp0;
temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 );
temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0xd807aa98 + ( (word08 = 0x80000000U) );
temp3 += temp7;
temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 );
temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0x12835b01 + ( (word09 = 0) );
temp2 += temp6;
temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 );
temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0x243185be + ( (word10 = 0) );
temp1 += temp5;
temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 );
temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0x550c7dc3 + ( (word11 = 0) );
temp0 += temp4;
temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 );
temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0x72be5d74 + ( (word12 = 0) );
temp7 += temp3;
temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 );
temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0x80deb1fe + ( (word13 = 0) );
temp6 += temp2;
temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 );
temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0x9bdc06a7 + ( (word14 = 0) );
temp5 += temp1;
temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 );
temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0xc19bf174 + ( (word15 = 256) );
temp4 += temp0;
temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 );
temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0xe49b69c1 + ( (word00 += ROTXOR4( word14 ) + word09 + ROTXOR3( word01 ) ) );
temp3 += temp7;
temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 );
temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0xefbe4786 + ( (word01 += ROTXOR4( word15 ) + word10 + ROTXOR3( word02 ) ) );
temp2 += temp6;
temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 );
temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0x0fc19dc6 + ( (word02 += ROTXOR4( word00 ) + word11 + ROTXOR3( word03 ) ) );
temp1 += temp5;
temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 );
temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0x240ca1cc + ( (word03 += ROTXOR4( word01 ) + word12 + ROTXOR3( word04 ) ) );
temp0 += temp4;
temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 );
temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0x2de92c6f + ( (word04 += ROTXOR4( word02 ) + word13 + ROTXOR3( word05 ) ) );
temp7 += temp3;
temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 );
temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0x4a7484aa + ( (word05 += ROTXOR4( word03 ) + word14 + ROTXOR3( word06 ) ) );
temp6 += temp2;
temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 );
temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0x5cb0a9dc + ( (word06 += ROTXOR4( word04 ) + word15 + ROTXOR3( word07 ) ) );
temp5 += temp1;
temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 );
temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0x76f988da + ( (word07 += ROTXOR4( word05 ) + word00 + ROTXOR3( word08 ) ) );
temp4 += temp0;
temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 );
temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0x983e5152 + ( (word08 += ROTXOR4( word06 ) + word01 + ROTXOR3( word09 ) ) );
temp3 += temp7;
temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 );
temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0xa831c66d + ( (word09 += ROTXOR4( word07 ) + word02 + ROTXOR3( word10 ) ) );
temp2 += temp6;
temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 );
temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0xb00327c8 + ( (word10 += ROTXOR4( word08 ) + word03 + ROTXOR3( word11 ) ) );
temp1 += temp5;
temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 );
temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0xbf597fc7 + ( (word11 += ROTXOR4( word09 ) + word04 + ROTXOR3( word12 ) ) );
temp0 += temp4;
temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 );
temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0xc6e00bf3 + ( (word12 += ROTXOR4( word10 ) + word05 + ROTXOR3( word13 ) ) );
temp7 += temp3;
temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 );
temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0xd5a79147 + ( (word13 += ROTXOR4( word11 ) + word06 + ROTXOR3( word14 ) ) );
temp6 += temp2;
temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 );
temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0x06ca6351 + ( (word14 += ROTXOR4( word12 ) + word07 + ROTXOR3( word15 ) ) );
temp5 += temp1;
temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 );
temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0x14292967 + ( (word15 += ROTXOR4( word13 ) + word08 + ROTXOR3( word00 ) ) );
temp4 += temp0;
temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 );
temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0x27b70a85 + ( (word00 += ROTXOR4( word14 ) + word09 + ROTXOR3( word01 ) ) );
temp3 += temp7;
temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 );
temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0x2e1b2138 + ( (word01 += ROTXOR4( word15 ) + word10 + ROTXOR3( word02 ) ) );
temp2 += temp6;
temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 );
temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0x4d2c6dfc + ( (word02 += ROTXOR4( word00 ) + word11 + ROTXOR3( word03 ) ) );
temp1 += temp5;
temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 );
temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0x53380d13 + ( (word03 += ROTXOR4( word01 ) + word12 + ROTXOR3( word04 ) ) );
temp0 += temp4;
temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 );
temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0x650a7354 + ( (word04 += ROTXOR4( word02 ) + word13 + ROTXOR3( word05 ) ) );
temp7 += temp3;
temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 );
temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0x766a0abb + ( (word05 += ROTXOR4( word03 ) + word14 + ROTXOR3( word06 ) ) );
temp6 += temp2;
temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 );
temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0x81c2c92e + ( (word06 += ROTXOR4( word04 ) + word15 + ROTXOR3( word07 ) ) );
temp5 += temp1;
temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 );
temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0x92722c85 + ( (word07 += ROTXOR4( word05 ) + word00 + ROTXOR3( word08 ) ) );
temp4 += temp0;
temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 );
temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0xa2bfe8a1 + ( (word08 += ROTXOR4( word06 ) + word01 + ROTXOR3( word09 ) ) );
temp3 += temp7;
temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 );
temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0xa81a664b + ( (word09 += ROTXOR4( word07 ) + word02 + ROTXOR3( word10 ) ) );
temp2 += temp6;
temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 );
temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0xc24b8b70 + ( (word10 += ROTXOR4( word08 ) + word03 + ROTXOR3( word11 ) ) );
temp1 += temp5;
temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 );
temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0xc76c51a3 + ( (word11 += ROTXOR4( word09 ) + word04 + ROTXOR3( word12 ) ) );
temp0 += temp4;
temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 );
temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0xd192e819 + ( (word12 += ROTXOR4( word10 ) + word05 + ROTXOR3( word13 ) ) );
temp7 += temp3;
temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 );
temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0xd6990624 + ( (word13 += ROTXOR4( word11 ) + word06 + ROTXOR3( word14 ) ) );
temp6 += temp2;
temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 );
temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0xf40e3585 + ( (word14 += ROTXOR4( word12 ) + word07 + ROTXOR3( word15 ) ) );
temp5 += temp1;
temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 );
temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0x106aa070 + ( (word15 += ROTXOR4( word13 ) + word08 + ROTXOR3( word00 ) ) );
temp4 += temp0;
temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 );
temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0x19a4c116 + ( (word00 += ROTXOR4( word14 ) + word09 + ROTXOR3( word01 ) ) );
temp3 += temp7;
temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 );
temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0x1e376c08 + ( (word01 += ROTXOR4( word15 ) + word10 + ROTXOR3( word02 ) ) );
temp2 += temp6;
temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 );
temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0x2748774c + ( (word02 += ROTXOR4( word00 ) + word11 + ROTXOR3( word03 ) ) );
temp1 += temp5;
temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 );
temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0x34b0bcb5 + ( (word03 += ROTXOR4( word01 ) + word12 + ROTXOR3( word04 ) ) );
temp0 += temp4;
temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 );
temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0x391c0cb3 + ( (word04 += ROTXOR4( word02 ) + word13 + ROTXOR3( word05 ) ) );
temp7 += temp3;
temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 );
temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0x4ed8aa4a + ( (word05 += ROTXOR4( word03 ) + word14 + ROTXOR3( word06 ) ) );
temp6 += temp2;
temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 );
temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0x5b9cca4f + ( (word06 += ROTXOR4( word04 ) + word15 + ROTXOR3( word07 ) ) );
temp5 += temp1;
temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 );
temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0x682e6ff3 + ( (word07 += ROTXOR4( word05 ) + word00 + ROTXOR3( word08 ) ) );
temp4 += temp0;
temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 );
temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0x748f82ee + ( (word08 += ROTXOR4( word06 ) + word01 + ROTXOR3( word09 ) ) );
temp3 += temp7;
temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 );
temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0x78a5636f + ( (word09 += ROTXOR4( word07 ) + word02 + ROTXOR3( word10 ) ) );
temp2 += temp6;
temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 );
temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0x84c87814 + ( (word10 += ROTXOR4( word08 ) + word03 + ROTXOR3( word11 ) ) );
temp1 += temp5;
temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 );
temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0x8cc70208 + ( (word11 += ROTXOR4( word09 ) + word04 + ROTXOR3( word12 ) ) );
temp0 += temp4;
temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 );
temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0x90befffa + ( (word12 += ROTXOR4( word10 ) + word05 + ROTXOR3( word13 ) ) );
temp7 += temp3;
temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 );
temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0xa4506ceb + ( (word13 += ROTXOR4( word11 ) + word06 + ROTXOR3( word14 ) ) );
temp6 += temp2;
temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 );
temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0xbef9a3f7 + ( (word14 += ROTXOR4( word12 ) + word07 + ROTXOR3( word15 ) ) );
temp5 += temp1;
temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 );
temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0xc67178f2 + ( (word15 += ROTXOR4( word13 ) + word08 + ROTXOR3( word00 ) ) );
temp4 += temp0;
temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 );
word00 = 0x6a09e667UL + temp0;
word01 = 0xbb67ae85UL + temp1;
word02 = 0x3c6ef372UL + temp2;
word03 = 0xa54ff53aUL + temp3;
word04 = 0x510e527fUL + temp4;
word05 = 0x9b05688cUL + temp5;
word06 = 0x1f83d9abUL + temp6;
word07 = 0x5be0cd19UL + temp7;
}
state[0] = bytereverse(word00);
state[1] = bytereverse(word01);
state[2] = bytereverse(word02);
state[3] = bytereverse(word03);
state[4] = bytereverse(word04);
state[5] = bytereverse(word05);
state[6] = bytereverse(word06);
state[7] = bytereverse(word07);
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
SHA256_CTX ctx;
SHA256_Init(&ctx);
SHA256_Update(&ctx, saved_key[index], strlen(saved_key[index]));
SHA256_Update(&ctx, cur_salt->salt, 32);
SHA256_Final((unsigned char*)crypt_out[index], &ctx);
#ifdef COMMON_DIGEST_FOR_OPENSSL
pwsafe_sha256_iterate(ctx.hash, cur_salt->iterations);
memcpy(crypt_out[index], ctx.hash, 32);
#else
pwsafe_sha256_iterate(ctx.h, cur_salt->iterations);
memcpy(crypt_out[index], ctx.h, 32);
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], BINARY_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void pwsafe_set_key(char *key, int index)
{
int saved_key_length = strlen(key);
if (saved_key_length > PLAINTEXT_LENGTH)
saved_key_length = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_key_length);
saved_key[index][saved_key_length] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
#if FMT_MAIN_VERSION > 11
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->iterations;
}
#endif
struct fmt_main fmt_pwsafe = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{
"iteration count",
},
#endif
pwsafe_tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{
iteration_count,
},
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
set_salt,
pwsafe_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_binop__second_fc64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__second_fc64)
// A.*B function (eWiseMult): GB (_AemultB_08__second_fc64)
// A.*B function (eWiseMult): GB (_AemultB_02__second_fc64)
// A.*B function (eWiseMult): GB (_AemultB_04__second_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__second_fc64)
// A*D function (colscale): GB (_AxD__second_fc64)
// D*A function (rowscale): GB (_DxB__second_fc64)
// C+=B function (dense accum): GB (_Cdense_accumB__second_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__second_fc64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_fc64)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// A pattern? 1
// B type: GxB_FC64_t
// B pattern? 0
// BinaryOp: cij = bij
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// true if values of A are not used
#define GB_A_IS_PATTERN \
1 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = y ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
1
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_FC64 || GxB_NO_SECOND_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__second_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__second_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__second_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__second_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__second_fc64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__second_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC64_t alpha_scalar ;
GxB_FC64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC64_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__second_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__second_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__second_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__second_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = GBX (Bx, p, false) ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 4;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,3);t1++) {
lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6));
ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(1,ceild(24*t2-Nz+9,4)),3*t1+1),6*t1-6*t2+2);t3<=min(min(min(floord(4*Nt+Ny-9,4),floord(12*t1+Ny+15,4)),floord(24*t2+Ny+11,4)),floord(24*t1-24*t2+Nz+Ny+13,4));t3++) {
for (t4=max(max(max(max(0,ceild(3*t1-3*t2-2,4)),ceild(3*t1-6,8)),ceild(24*t2-Nz-19,32)),ceild(4*t3-Ny-19,32));t4<=min(min(min(min(floord(4*Nt+Nx-9,32),floord(12*t1+Nx+15,32)),floord(24*t2+Nx+11,32)),floord(4*t3+Nx-9,32)),floord(24*t1-24*t2+Nz+Nx+13,32));t4++) {
for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(32*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),t3-1),8*t4+6);t5++) {
for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) {
lbv=max(32*t4,4*t5+4);
ubv=min(32*t4+31,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
host_varfn_function.c | #include <stdio.h>
#include <omp.h>
#include <hostrpc.h>
// This user variable function returns a uint so declare function
// as hostrpc_varfn_uint_t .
hostrpc_varfn_uint_t my3argfn;
hostrpc_varfn_double_t mydoublefn;
// This is an arbitrary 3 arg function
uint my3argfn(void * fnptr, ...) {
va_list args;
va_start(args, fnptr);
int*a = va_arg(args, int*);
int i2 = va_arg(args, int);
int i3 = va_arg(args, int);
printf(" INSIDE my3argfn: fnptr:%p &a:%p int arg2:%d int arg3:%d \n", fnptr,a,i2,i3);
va_end(args);
return i2+i3;
}
// This is an arbitrary 3 arg function
double mydoublefn(void * fnptr, ...) {
va_list args;
va_start(args, fnptr);
int*a = va_arg(args, int*);
int i2 = va_arg(args, int);
int i3 = va_arg(args, int);
double rc = (double) (i2+i3) * 1.1;
printf(" INSIDE mydoublefn: fnptr:%p &a:%p int arg2:%d int arg3:%d rc:%f \n", fnptr,a,i2,i3,rc);
va_end(args);
return rc;
}
int main()
{
int N = 10;
int a[N];
int b[N];
int i;
for (i=0; i<N; i++){
a[i]=0;
b[i]=i;
}
hostrpc_varfn_uint_t * my_host_fn_ptr;
my_host_fn_ptr = &my3argfn;
hostrpc_varfn_double_t * my_host_fn_double;
my_host_fn_double = &mydoublefn;
printf("Testing my3argfn execution as function pointer %p &a:%p\n",(void *) my_host_fn_ptr, &a);
uint sim1 = my_host_fn_ptr(NULL, &a, 2, 3);
double sim1d = my_host_fn_double(NULL, &a, 2, 3);
printf("Return values are %d and %f \n",sim1,sim1d);
printf("\nTesting the host fallback of hostrpc_varfn_double:%p\n",my_host_fn_double);
uint sim2 = hostrpc_varfn_uint(my_host_fn_ptr, &a, 4, 5);
double sim2d = hostrpc_varfn_double(my_host_fn_double, &a, 4, 5);
printf("Return values are %d and %f \n",sim2,sim2d);
printf("\nTesting call to hostrpc_varfn_uint in target region:%p\n",my_host_fn_ptr);
#pragma omp target parallel for map(from: a[0:N]) map(to: b[0:N]) map(to: my_host_fn_ptr,my_host_fn_double)
for (int j = 0; j< N; j++) {
a[j]=b[j];
uint rc=hostrpc_varfn_uint(my_host_fn_ptr, &a, j, a[j]);
double rcd=hostrpc_varfn_double(my_host_fn_double, &a, j, a[j]);
printf("DEVICE: fnptr:%p dfnptr:%p &a:%p j:%d a[j]:%d hostrpc_varfn_uint return vals are %d %f\n",
(void*) my_host_fn_ptr,
(void*) my_host_fn_double,
(void*) &a, j, a[j],rc,rcd);
}
int rc = 0;
for (i=0; i<N; i++)
if (a[i] != b[i] ) {
rc++;
printf ("Wrong value: a[%d]=%d\n", i, a[i]);
}
if (!rc){
printf("Success\n");
return EXIT_SUCCESS;
} else{
printf("Failure\n");
return EXIT_FAILURE;
}
}
|
GB_unaryop__abs_int32_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int32_int16
// op(A') function: GB_tran__abs_int32_int16
// C type: int32_t
// A type: int16_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, x) \
int32_t z = (int32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int32_int16
(
int32_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int32_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
l7_dev_setup.c | /*
* Copyright (c) 2011-2019, Triad National Security, LLC.
* All rights Reserved.
*
* CLAMR -- LA-CC-11-094
*
* Copyright 2011-2019. Triad National Security, LLC. This software was produced
* under U.S. Government contract 89233218CNA000001 for Los Alamos National
* Laboratory (LANL), which is operated by Triad National Security, LLC
* for the U.S. Department of Energy. The U.S. Government has rights to use,
* reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR
* TRIAD NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR
* ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is modified
* to produce derivative works, such modified software should be clearly marked,
* so as not to confuse it with the version available from LANL.
*
* Additionally, redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the Triad National Security, LLC, Los Alamos
* National Laboratory, LANL, the U.S. Government, nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE TRIAD NATIONAL SECURITY, LLC AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
* NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL TRIAD NATIONAL
* SECURITY, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "l7.h"
#include "l7p.h"
#include <stdlib.h>
int l7_int_comp(const int *x, const int *y);
#define L7_LOCATION "L7_SETUP"
int L7_Dev_Setup(
const int num_base,
const int my_start_index,
const int num_indices_owned,
int *indices_needed,
const int num_indices_needed,
int *l7_id
)
{
/* Purpose
* =======
* L7_Dev_Setup is used to setup the update/scatter database as
* defined by the global indexing scheme. Each process passes
* in parameters which define the indices it owns (i.e. as
* defined by 'my_start_index' and 'num_indices_owned') and
* lists the indices it needs ('indices_needed'). From this,
* a database is defined that allows subsequent calls to
* L7_Update.
*
* Notes:
* ======
* 1) Assumes a global indexing set, linearly decomposed across
* all processes.
*
* Arguments
* =========
* num_base (input) const L7_INT
* global indexing set starts with 1 (Fortran)
* or with 0 (C)
*
* my_start_index (input) const L7_INT
* Starting index number of calling process
* in global indexing set.
*
* num_indices_owned (input) const L7_INT
* Number of indices owned by calling process.
*
* indices_needed (input) const L7_INT*
* Array containing indices needed by
* calling process.
*
* num_indices_needed (input) const L7_INT
* Number of indices of interest listed
* in array 'num_indices_needed'.
*
* l7_id (input/output) int*
* Handle to database to be setup.
*
* 0: L7 sets up a new database, and
* assigns it a value.
* > 0: L7 resets existing database with
* input information. That is, it reuses
* the allocated memory.
* < 0: An error is returned.
*
* Notes:
* =====
* 1) The handling of 0-based arrays for C and 1-based arrays for Fortran
* is handled in L7_Dev_Setup. This is done by taking the input global
* indices stored in 'indices_global_to_send' and converting them to
* 1-based and storing them in 'indices_local_to_send'.
*
* 2) The indices are handled as 4-byte integers.
*
* 3) Serial compilation creates a no-op.
*
* Program Flow
* ============
* 0) Check input for basic validity.
* 1) Set communication parameters within database.
* 2) Deternine processes this pe receives from.
* 3) Determine the number of processes this pe sends to.
* 4) Send number of as well as the indices needed from each sending process.
* 5) Set up array containing the pes this pe sends indices to.
* 6) Set up array containing the indices this pe sends to others.
*/
/*
* Local variables.
*/
int
ierr; /* Error code for return */
#ifdef HAVE_MPI
int
base_adj, /* 0 or 1 based arrays adjustment */
count_total,
i, j, /* Counters */
max_sizeof_type,
num_msgs, /* Number of sends and recvs needed */
numpes, /* Alias for l7_id_db.numpes. */
num_indices_acctd_for,
num_outstanding_requests = 0,
num_sends,
offset,
penum, /* Alias for l7_id_db.penum. */
*pi4_in, /* (int *)l7.receive_buffer */
*pi4_out, /* (int *)l7.send_buffer */
send_buffer_bytes_needed, /* Buffer space requirement. */
start_indices_needed,
this_index; /* Offset into indexing set. */
l7_id_database
*l7_id_db;
MPI_Request
*mpi_request; /* Local alias for l7_id_db->mpi_request. */
MPI_Status
*mpi_status; /* Local alias for l7_id_db->mpi_status. */
#if defined (_L7_DEBUG)
int
k; /* Counter */
#endif
/*
* Executable Statements
*/
if (! l7.mpi_initialized){
return(0);
}
if (l7.initialized != 1){
ierr = -1;
L7_ASSERT( l7.initialized == 1, "L7 not initialized", ierr);
}
/*
* Check input
*/
if (num_base){
base_adj = 1;
}
else {
base_adj = 0;
}
if (my_start_index < 0){
ierr = -1;
L7_ASSERT( my_start_index >= 0, "my_start_index < 0", ierr);
}
if (num_indices_owned < 0){
ierr = -1;
L7_ASSERT( num_indices_owned >= 0, "num_indices_owned < 0", ierr);
}
if (num_indices_needed > 0){
if (indices_needed == NULL){
ierr = -1;
L7_ASSERT( (int *)indices_needed != NULL,
"indices_needed == NULL", ierr);
}
}
if (*l7_id < 0){
ierr = *l7_id;
L7_ASSERT( *l7_id >=0,
"L7 Id must be either 0 (new id) or > 0 (existing id)",
ierr);
}
/*
* Setup database structure.
*/
if (*l7_id != 0){
/*
* Find it in the database and update based on new input.
*/
if (l7.first_db == NULL){
L7_ASSERT(l7.first_db != NULL,
"Uninitialized l7_id input, but no ids in database",
ierr);
}
l7_id_db = l7.first_db;
while (l7_id_db){
if (l7_id_db->l7_id == *l7_id)
break;
l7_id_db = l7_id_db->next_db;
}
if (l7.first_db == NULL){
ierr = -1;
L7_ASSERT( l7.first_db != NULL,
"Uninitialized l7_id input, but not found in this list",
ierr);
}
}
else{
/*
* Allocate new database, insert into linked list.
*/
if (l7.num_dbs >= L7_MAX_NUM_DBS){
ierr = -1;
L7_ASSERT(l7.num_dbs < L7_MAX_NUM_DBS,
"Too many L7 databases allocataed",
ierr);
}
l7_id_db = (l7_id_database*)calloc(1L, sizeof(l7_id_database) );
if (l7_id_db == NULL){
ierr = -1;
L7_ASSERT( l7_id_db != NULL, "Failed to allocate new database",
ierr);
}
if ( !(l7.first_db) ){
l7.first_db = l7_id_db;
l7.last_db = l7_id_db;
l7_id_db->next_db = NULL; /* Paranoia */
l7_id_db->l7_id = 1;
l7.num_dbs = 1;
}
else{
/*
* Assign a l7_id.
*/
l7_id_db->l7_id = l7.last_db->l7_id + 1;
/*
* Reset links.
*/
l7.last_db->next_db = l7_id_db;
l7.last_db = l7_id_db;
l7.num_dbs++;
}
*l7_id = l7_id_db->l7_id;
/*
* Initialize some parameters.
*/
l7_id_db->recv_counts_len = 0;
l7_id_db->recv_from_len = 0;
l7_id_db->send_to_len = 0;
l7_id_db->send_counts_len = 0;
l7_id_db->indices_to_send_len = 0;
l7_id_db->mpi_request_len = 0;
l7_id_db->mpi_status_len = 0;
}
/*
* Store input in database.
*/
l7_id_db->my_start_index = my_start_index;
l7_id_db->num_indices_owned = num_indices_owned;
if ( (l7_id_db->indices_needed_len < num_indices_needed ) &&
(num_indices_needed > 0) ){
if (l7_id_db->indices_needed)
free(l7_id_db->indices_needed);
l7_id_db->indices_needed =
(int *)calloc((unsigned long long)num_indices_needed, sizeof(int) );
if (l7_id_db->indices_needed == NULL){
ierr = -1;
L7_ASSERT( (int*)(l7_id_db->indices_needed) != NULL,
"Memory failure for indices_needed",
ierr);
}
l7_id_db->indices_needed_len = num_indices_needed;
}
#ifdef _OPENMP_SIMD
#pragma omp simd
#endif
for (i=0; i<num_indices_needed; i++){
l7_id_db->indices_needed[i] = indices_needed[i];
}
l7_id_db->num_indices_needed = num_indices_needed;
ierr = MPI_Comm_rank (MPI_COMM_WORLD, &l7_id_db->penum );
L7_ASSERT( ierr == MPI_SUCCESS, "MPI_Comm_rank", ierr);
ierr = MPI_Comm_size (MPI_COMM_WORLD, &l7_id_db->numpes );
L7_ASSERT( ierr == MPI_SUCCESS, "MPI_Comm_size", ierr);
l7.penum = l7_id_db->penum;
/* Local shorthand */
numpes = l7_id_db->numpes;
penum = l7_id_db->penum;
if (numpes == 1){
return(0);
}
/*
* Create array containing starting (global) index numbers
* for all processes.
*
* 1) Allgather num_indices_owned.
* 2) Scan to create starting_index.
* 3) Shift all array elements up 1 position.
* 4) Set starting_indices[0] = 0.
*
* The latter two steps allows arrays to be used as below.
*/
l7_id_db->starting_indices =
(int *)calloc((unsigned long long)(numpes+1), sizeof(int));
if(l7_id_db->starting_indices == NULL){
ierr = -1;
L7_ASSERT(l7_id_db->starting_indices != NULL,
"No memory for l7_id_db->starting_indices", ierr);
}
ierr = MPI_Allgather( &(l7_id_db->num_indices_owned), 1, MPI_INT,
&(l7_id_db->starting_indices[1]), 1, MPI_INT,
MPI_COMM_WORLD);
L7_ASSERT( ierr == MPI_SUCCESS, "MPI_Allgather (num_indices_owned)",
ierr);
l7_id_db->starting_indices[0] = 0;
// l7_id_db->starting_indices[0] = 1;
for (i=0; i<numpes; i++)
l7_id_db->starting_indices[i+1] += l7_id_db->starting_indices[i];
/*
* Determine the number of processes this pe receives from.
*/
l7_id_db->num_recvs = 0;
start_indices_needed = -1;
this_index = 0;
if (num_indices_needed > 0){
for (j=0; j<numpes; j++){
if ( indices_needed[this_index] >= l7_id_db->starting_indices[j]){
if (indices_needed[this_index] < l7_id_db->starting_indices[j+1]){
l7_id_db->num_recvs++;
#if defined _L7_DEBUG
printf("[pe %d] Found first one on pe %d. \n", penum, j);
#endif
/* Skip through all the rest on pe j. */
while ( ( this_index < num_indices_needed) &&
( indices_needed[this_index] < l7_id_db->starting_indices[j+1] ) )
this_index++;
/* Remember where we found the first one. */
if ( start_indices_needed == -1)
start_indices_needed = j;
if (this_index == num_indices_needed)
break;
}
}
}
if (l7_id_db->num_recvs == 0){
ierr = -1;
L7_ASSERT(l7_id_db->num_recvs != 0, "No indices found", ierr);
}
}
if (this_index != num_indices_needed){
printf("[pe %d] ERROR -- can't find all the indices I need. I have %d, need %d\n",
penum, this_index, num_indices_needed);
}
#if defined _L7_DEBUG
printf("[pe %d] l7_id_dp->num_recvs = %d\n",
penum, l7_id_db->num_recvs);
#endif
/*
* Allocate space for counts for each pe sending to this one.
*/
if (l7_id_db->num_recvs > l7_id_db->recv_counts_len){
if (l7_id_db->recv_counts)
free(l7_id_db->recv_counts);
l7_id_db->recv_counts =
(int *)calloc((unsigned long long)l7_id_db->num_recvs, sizeof(int) );
if (l7_id_db->recv_counts == NULL){
ierr = -1;
L7_ASSERT(l7_id_db->recv_counts != NULL,
"No space for l7_id_db->recv_counts", ierr);
}
l7_id_db->recv_counts_len = l7_id_db->num_recvs;
int num_recvs = l7_id_db->num_recvs; // for vectorization
for (i=0; i<num_recvs; i++)
l7_id_db->recv_counts[i] = 0; /* calloc does not guarantee = 0. */
}
if (l7_id_db->num_recvs > l7_id_db->recv_from_len){
if (l7_id_db->recv_from)
free(l7_id_db->recv_from);
l7_id_db->recv_from =
(int *)calloc((unsigned long long)l7_id_db->num_recvs, sizeof(int) );
if (l7_id_db->recv_from == NULL){
ierr = -1;
L7_ASSERT(l7_id_db->recv_from != NULL,
"No space for l7_id_db->recv_from", ierr);
}
l7_id_db->recv_from_len = l7_id_db->num_recvs;
int num_recvs = l7_id_db->num_recvs; // for vectorization
for (i=0; i<num_recvs; i++)
l7_id_db->recv_from[i] = -999;
}
/*
* Determine process and the number of indices this pe recvs from it.
*/
if (num_indices_needed > 0){
this_index = 0;
num_indices_acctd_for = 0;
i=0;
for (j=start_indices_needed; j<numpes; j++){
if (indices_needed[this_index] >= l7_id_db->starting_indices[j] ){
if (indices_needed[this_index] < l7_id_db->starting_indices[j+1]){
/* Found the first one on pe j. */
l7_id_db->recv_from[i] = j;
l7_id_db->recv_counts[i] = 1;
num_indices_acctd_for++;
if (num_indices_acctd_for == num_indices_needed)
break;
this_index++;
while ( ( num_indices_acctd_for < num_indices_needed ) &&
( indices_needed[this_index] < l7_id_db->starting_indices[j+1] ) ) {
/* Find the rest on pe j. */
l7_id_db->recv_counts[i]++;
this_index++;
num_indices_acctd_for++;
}
if (num_indices_acctd_for == num_indices_needed)
break;
i++;
}
}
}
if (num_indices_needed != num_indices_acctd_for){
ierr = -1;
L7_ASSERT(num_indices_needed == num_indices_acctd_for,
"Failed to find all the needed indices", ierr);
}
}
/*
* Determine number of processes for which this pe owns indices
* those pes need. This is done use a reduction (MPI_Allreduce).
*/
if (l7.sizeof_send_buffer < 2 * numpes * (int)sizeof(int)){
if (l7.send_buffer)
free(l7.send_buffer);
l7.send_buffer = calloc ((unsigned long long)(2*numpes), sizeof(int));
if (l7.send_buffer == NULL){
ierr = -1;
L7_ASSERT(l7.send_buffer != NULL, "No memory for send buffer", ierr);
}
l7.sizeof_send_buffer = 2 * numpes * (int)sizeof(int);
}
pi4_in = (int*)l7.send_buffer;
pi4_out = &pi4_in[numpes];
for (i=0; i<numpes; i++)
pi4_in[i] = 0;
for (i=0; i<l7_id_db->num_recvs; i++)
pi4_in[l7_id_db->recv_from[i]] = 1;
ierr = MPI_Allreduce(pi4_in, pi4_out, numpes, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
L7_ASSERT(ierr == MPI_SUCCESS, "MPI_Allreduce ( l7_id_db->recv_from )", ierr);
l7_id_db->num_sends = pi4_out[penum];
#if defined _L7_DEBUG
printf("[pe %d] l7_id_db->num_sends = %d \n", penum, l7_id_db->num_sends);
#endif
/*
* Allocate request and status arrays.
*/
num_msgs = ( 2 * l7_id_db->num_recvs ) + l7_id_db->num_sends;
/* Ensure enough outstanding messages for L7_Update_pack model. */
if (num_msgs < (L7_MIN_MPI_REQS * l7_id_db->num_recvs ) )
num_msgs = L7_MIN_MPI_REQS * l7_id_db->num_recvs;
if (num_msgs > l7_id_db->mpi_request_len) {
if (l7_id_db->mpi_request)
free(l7_id_db->mpi_request);
l7_id_db->mpi_request = (MPI_Request *) calloc ((unsigned long long)num_msgs, sizeof(MPI_Request));
if (l7_id_db->mpi_request == NULL){
ierr = -1;
L7_ASSERT(l7_id_db->mpi_request != NULL,
"Allocation of l7_id_db->mpi_request failed", ierr);
}
l7_id_db->mpi_request_len = num_msgs;
}
if (num_msgs > l7_id_db->mpi_status_len){
if (l7_id_db->mpi_status)
free(l7_id_db->mpi_status);
l7_id_db->mpi_status = (MPI_Status *) calloc((unsigned long long)num_msgs, sizeof(MPI_Status) );
if (l7_id_db->mpi_status == NULL){
ierr = -1;
L7_ASSERT(l7_id_db->mpi_status != NULL,
"Allocation of l7_id_db->mpi_status failed", ierr);
}
l7_id_db->mpi_status_len = num_msgs;
}
/* Local shorthand */
mpi_request = l7_id_db->mpi_request;
mpi_status = l7_id_db->mpi_status;
/*
* Send number of indices needed from each sending process.
*/
num_outstanding_requests = 0;
for (i=0; i<l7_id_db->num_recvs; i++){
#if defined _L7_DEBUG
printf("[pe %d] recv_counts[%d] = %d to pe %d \n", penum, i,
l7_id_db->recv_counts[i], l7_id_db->recv_from[i] );
#endif
ierr = MPI_Isend(&l7_id_db->recv_counts[i], 1, MPI_INT,
l7_id_db->recv_from[i], L7_SETUP_SEND_COUNT_TAG,
MPI_COMM_WORLD, &mpi_request[num_outstanding_requests++] );
L7_ASSERT(ierr == MPI_SUCCESS, "MPI_Isend (recv_counts[i] )",
ierr);
}
/*
* Receive counts for the processes to which this pe sends.
* This pe doesn't know who needs what it has, so we must
* use wildcard receives.
*/
if (l7_id_db->num_sends > l7_id_db->send_counts_len){
if (l7_id_db->send_counts)
free(l7_id_db->send_counts);
l7_id_db->send_counts = (int *) calloc((unsigned long long)l7_id_db->num_sends, sizeof(int) );
if (l7_id_db->send_counts == NULL){
ierr = -1;
L7_ASSERT(l7_id_db->send_counts != NULL,
"Failed to allocate l7_id_db->send_counts", ierr);
}
l7_id_db->send_counts_len = l7_id_db->num_sends;
}
if (l7_id_db->num_sends > l7_id_db->send_to_len){
if (l7_id_db->send_to)
free(l7_id_db->send_to);
l7_id_db->send_to = (int *) calloc((unsigned long long)l7_id_db->num_sends, sizeof(int) );
if (l7_id_db->send_to == NULL){
ierr = -1;
L7_ASSERT(l7_id_db->send_to != NULL,
"Failed to allocate l7_id_db->send_to", ierr);
}
l7_id_db->send_to_len = l7_id_db->num_sends;
}
for (i=0; i<l7_id_db->num_sends; i++){
ierr = MPI_Irecv(&l7_id_db->send_counts[i], 1, MPI_INT,
MPI_ANY_SOURCE, L7_SETUP_SEND_COUNT_TAG, MPI_COMM_WORLD,
&mpi_request[num_outstanding_requests++] );
L7_ASSERT(ierr == MPI_SUCCESS, "MPI_Irecv ( indices_needed[i] )", ierr);
}
if (num_outstanding_requests > 0){
ierr = MPI_Waitall(num_outstanding_requests, mpi_request, mpi_status);
L7_ASSERT(ierr == MPI_SUCCESS, "MPI_Waitall ( counts )", ierr);
}
num_outstanding_requests = 0;
/*
* Determine which processes sent the above messages.
* These are the 'send_to' processes.
*/
offset = l7_id_db->num_recvs;
for (i=0; i<l7_id_db->num_sends; i++){
l7_id_db->send_to[i] = mpi_status[offset+i].MPI_SOURCE;
}
/*
* Allocate space for 'indices_global_to_send' and
* 'indices_local_to_send'.
*/
count_total = 0;
for (i=0; i<l7_id_db->num_sends; i++){
count_total += l7_id_db->send_counts[i];
}
if (count_total > l7_id_db->indices_to_send_len){
if (l7_id_db->indices_global_to_send)
free(l7_id_db->indices_global_to_send);
l7_id_db->indices_global_to_send = (int *) calloc((unsigned long long)count_total, sizeof(int) );
if (l7_id_db->indices_global_to_send == NULL){
ierr = -1;
L7_ASSERT(l7_id_db->indices_global_to_send != NULL,
"No memory for l7_id_db->indices_global_to_send.", ierr);
}
if (l7_id_db->indices_local_to_send)
free(l7_id_db->indices_local_to_send);
l7_id_db->indices_local_to_send = (int *) calloc((unsigned long long)count_total, sizeof(int) );
if (l7_id_db->indices_local_to_send == NULL){
ierr = -1;
L7_ASSERT(l7_id_db->indices_local_to_send != NULL,
"No memory for l7_id_db->indices_local_to_send.", ierr);
}
l7_id_db->indices_to_send_len = count_total;
}
/*
* Send (global) indices needed from each sending process.
*/
offset = 0;
for (i=0; i<l7_id_db->num_recvs; i++){
#if defined _L7_DEBUG
printf("[pe %d] Sending %d indices to pe %d. \n",
penum, l7_id_db->recv_counts[i], l7_id_db->recv_from[i] );
for (k=offset; k<offset+l7_id_db->recv_counts[i]; k++){
printf(" index[%d] = %d \n", k, l7_id_db->indices_needed[k] );
}
#endif
ierr = MPI_Isend(&l7_id_db->indices_needed[offset],
l7_id_db->recv_counts[i], MPI_INT,
l7_id_db->recv_from[i], L7_SETUP_INDICES_NEEDED_TAG,
MPI_COMM_WORLD, &mpi_request[num_outstanding_requests++] );
L7_ASSERT(ierr == MPI_SUCCESS, "MPI_Isend ( indices_needed[i] )", ierr);
offset+=l7_id_db->recv_counts[i];
}
/*
* Receive (global) indices needed by the pes to which this pe sends.
* Note that these receives are from expected sources.
*/
offset = 0;
for (i=0; i<l7_id_db->num_sends; i++){
ierr = MPI_Irecv(&l7_id_db->indices_global_to_send[offset],
l7_id_db->send_counts[i], MPI_INT,
l7_id_db->send_to[i], L7_SETUP_INDICES_NEEDED_TAG,
MPI_COMM_WORLD, &mpi_request[num_outstanding_requests++] );
L7_ASSERT(ierr == MPI_SUCCESS, "MPI_Irecv ( indices_global_to_send )", ierr);
offset += l7_id_db->send_counts[i];
}
/*
* Complete indices communication.
*/
if (num_outstanding_requests > 0){
ierr = MPI_Waitall(num_outstanding_requests, mpi_request, mpi_status );
L7_ASSERT(ierr == MPI_SUCCESS, "MPI_Waitall ( indices )", ierr);
}
#if defined _L7_DEBUG
ierr = MPI_Barrier(MPI_COMM_WORLD);
offset = 0;
for (j=0; j<numpes; j++){
if (penum == j){
for (i=0; i<l7_id_db->num_sends; i++){
printf("[pe %d] Recvd %d indices from pe %d. \n", penum,
l7_id_db->send_counts[i], l7_id_db->send_to[i] );
for (k=offset; k<offset+l7_id_db->send_counts[i]; k++){
printf(" index[%d] = %d \n",k l7_id_db->indices_global_to_send[k] );
}
offset += l7_id_db->send_counts[i];
}
}
sleep(1);
}
#endif
/* Create array of local indices corresponding to
* array of global indices requested. Note the
* conversion from 1-based indices to 0-based is
* accomplished here. (See note in header).
*/
offset = 0;
for (i=0; i<l7_id_db->num_sends; i++){
int send_counts = l7_id_db->send_counts[i]; // for vectorization
int adj = (int)(my_start_index) - base_adj; // for vectorization
#ifdef _OPENMP_SIMD
#pragma omp simd linear(offset:1)
#endif
for (j=0; j<send_counts; j++){
l7_id_db->indices_local_to_send[offset] =
l7_id_db->indices_global_to_send[offset] - adj;
offset ++;
}
}
#if defined _L7_DEBUG
ierr = MPI_Barrier(MPI_COMM_WORLD);
for (i=0; i<numpes; i++){
if (penum == i){
for (j=0; j<l7_id_db->num_sends; j++){
printf("[pe %d] send %d indices to pe %d \n", penum,
l7_id_db->send_counts[j], l7_id_db->send_to[] );
ierr = MPI_Barrier(MPI_COMM_WORLD);
}
}
}
flush(stdout);
ierr = MPI_Barrier(MPI_COMM_WORLD);
L7_ASSERT(ierr == MPI_SUCCESS, "MPI_Barrier failure", ierr);
for (i=0; i<numpes; i++){
if (penum == i){
printf("----------------------------------------------------\n")
for (j=0; j<l7_id_db->num_sends; j++){
printf("[pe %d] Send (index %d) to pe %d. \n",penum,
l7_id_db->indices_global_to_send[j], l7_id_db->send_to[j] );
}
for (j=0; j<l7_id_db->num_recvs; j++){
printf("[pe %d] Recving (index %d) from pe %d. \n",penum,
l7_id_db->indices_needed[j], l7_id_db->recv_from[j] );
}
printf("----------------------------------------------------\n")
fflush(stdout);
}
sleep(2);
}
#endif /* _L7_DEBUG */
/*
* Ensure buffer available for data to be sent.
*/
send_buffer_bytes_needed = 0;
num_sends = l7_id_db->num_sends;
max_sizeof_type = sizeof(double);
for (i=0; i<num_sends; i++)
send_buffer_bytes_needed += l7_id_db->send_counts[i] * max_sizeof_type;
if (send_buffer_bytes_needed > l7.sizeof_send_buffer ){
if (l7.send_buffer)
free(l7.send_buffer);
l7.send_buffer = (char *)calloc((unsigned long long)send_buffer_bytes_needed, sizeof (char) );
if (l7.send_buffer == NULL){
ierr = -1;
L7_ASSERT(l7.send_buffer != NULL, "No memory for send buffer", ierr);
}
l7.sizeof_send_buffer = send_buffer_bytes_needed;
}
#ifdef HAVE_OPENCL
l7_id_db->num_indices_have = 0;
for (int i=0; i<num_sends; i++){
/* Load data to be sent. */
l7_id_db->num_indices_have += l7_id_db->send_counts[i];
}
size_t num_indices_have = l7_id_db->num_indices_have;
l7_id_db->indices_have = (int *) malloc(num_indices_have*sizeof(int));
int ioffset = 0;
for (int i=0; i<num_sends; i++){
/* Load data to be sent. */
int send_count = l7_id_db->send_counts[i];
#ifdef _OPENMP_SIMD
#pragma omp simd linear(ioffset:1)
#endif
for (int j=0; j<send_count; j++){
l7_id_db->indices_have[ioffset] = l7_id_db->indices_local_to_send[ioffset];
ioffset++;
}
}
// For optimization of cache --
//qsort(l7_id_db->indices_have, num_indices_have, sizeof(int), (__compar_fn_t)l7_int_comp);
if (l7.numpes > 1) {
l7_id_db->dev_indices_have = ezcl_malloc(NULL, "dev_indices_have", &num_indices_have, sizeof(cl_int), CL_MEM_READ_WRITE, 0);
cl_command_queue command_queue = ezcl_get_command_queue();
ezcl_enqueue_write_buffer(command_queue, l7_id_db->dev_indices_have, CL_TRUE, 0, num_indices_have*sizeof(cl_int), &l7_id_db->indices_have[0], NULL);
}
#endif
/*
* Message tag management
*/
l7_id_db->this_tag_update = L7_UPDATE_TAGS_MIN;
/*
* Database is setup for this l7_id -- return.
*/
#endif /* HAVE_MPI */
ierr = L7_OK;
return(ierr);
} /* End L7_Dev_Setup */
int l7_int_comp(const int *x, const int *y)
{
//if (*x == *y) return 0;
//return( (*x < *y) ? -1 : 1 )
return *x - *y;
}
void L7_DEV_SETUP(
const int *my_start_index,
const int *num_indices_owned,
int *indices_needed,
const int *num_indices_needed,
int *l7_id
)
{
L7_Dev_Setup(0, *my_start_index, *num_indices_owned, indices_needed, *num_indices_needed, l7_id);
}
|
static_elasticity2D.c | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <stdint.h>
#include <stdbool.h>
#include <math.h>
#include "nb/memory_bot.h"
#include "nb/math_bot.h"
#include "nb/solver_bot.h"
#include "nb/container_bot.h"
#include "nb/graph_bot.h"
#include "nb/geometric_bot.h"
#include "nb/pde_bot/material.h"
#include "nb/pde_bot/common_solid_mechanics/analysis2D.h"
#include "nb/pde_bot/common_solid_mechanics/formulas.h"
#include "nb/pde_bot/boundary_conditions/bcond.h"
#include "nb/pde_bot/boundary_conditions/bcond_iter.h"
#include "nb/pde_bot/finite_element/element.h"
#include "nb/pde_bot/finite_element/gaussp_to_nodes.h"
#include "nb/pde_bot/finite_element/solid_mechanics/static_elasticity2D.h"
#include "set_bconditions.h"
#include "pipeline.h"
#define POW2(a) ((a)*(a))
static int solver(const nb_sparse_t *const A,
const double *const b, double* x);
int nb_fem_compute_2D_Solid_Mechanics
(const nb_mesh2D_t *const part,
const nb_fem_elem_t *const elemtype,
const nb_material_t *const material,
const nb_bcond_t *const bcond,
bool enable_self_weight,
double gravity[2],
nb_analysis2D_t analysis2D,
nb_analysis2D_params *params2D,
const bool *elements_enabled, /* NULL to enable all */
double *displacement, /* Output */
double *strain /* Output */)
{
int status = 0;
nb_graph_t *graph = nb_allocate_mem(nb_graph_get_memsize());
nb_graph_init(graph);
nb_mesh2D_load_graph(part, graph, NB_NODES_LINKED_BY_ELEMS);
nb_sparse_t *K = nb_sparse_create(graph, NULL, 2);
nb_graph_finish(graph);
nb_free_mem(graph);
uint32_t N_nod = nb_mesh2D_get_N_nodes(part);
uint32_t F_memsize = 2 * N_nod * sizeof(double);
double* F = nb_soft_allocate_mem(F_memsize);
memset(F, 0, F_memsize);
int status_assemble =
pipeline_assemble_system(K, NULL, F, part, elemtype, material,
enable_self_weight, gravity,
analysis2D, params2D,
elements_enabled);
if (0 != status_assemble) {
status = 1;
goto CLEANUP_LINEAR_SYSTEM;
}
nb_fem_set_bconditions(part, K, F, bcond, 1.0);
int solver_status = solver(K, F, displacement);
if (0 != solver_status) {
status = 2;
goto CLEANUP_LINEAR_SYSTEM;
}
pipeline_compute_strain(strain, part, displacement, elemtype);
CLEANUP_LINEAR_SYSTEM:
nb_sparse_destroy(K);
nb_soft_free_mem(F_memsize, F);
return status;
}
static int solver(const nb_sparse_t *const A,
const double *const b, double* x)
{
uint32_t N = nb_sparse_get_size(A);
memset(x, 0, N * sizeof(*x));
int status = nb_sparse_solve_CG_precond_Jacobi(A, b, x, N,
1e-8, NULL,
NULL, 1);
int out;
if (0 == status || 1 == status)
out = 0;
else
out = 1; /* Tolerance not reached in CG Jacobi */
return out;
}
void nb_fem_compute_stress_from_strain
(uint32_t N_elements,
const nb_fem_elem_t *const elem,
const nb_material_t *const material,
nb_analysis2D_t analysis2D,
double* strain,
const bool* elements_enabled /* NULL to enable all */,
double* stress /* Output */)
{
/* Compute stress from element strain */
uint32_t omp_parallel_threads = 1;
#pragma omp parallel for num_threads(omp_parallel_threads) schedule(guided)
for (uint32_t i = 0; i < N_elements; i++) {
double D[4] = {1e-6, 1e-6, 1e-6, 1e-6};
if (pipeline_elem_is_enabled(elements_enabled, i))
nb_pde_get_constitutive_matrix(D, material,
analysis2D);
uint8_t N_gp = nb_fem_elem_get_N_gpoints(elem);
for (int j = 0; j < N_gp; j++) {
uint32_t id = i * N_gp + j;
stress[id * 3] = strain[id * 3] * D[0] +
strain[id*3+1] * D[1];
stress[id*3+1] = strain[id * 3] * D[1] +
strain[id*3+1] * D[2];
stress[id*3+2] = strain[id*3+2] * D[3];
}
}
}
|
utils.c | // Copyright (c) 2015, UChicago Argonne, LLC. All rights reserved.
// Copyright 2015. UChicago Argonne, LLC. This software was produced
// under U.S. Government contract DE-AC02-06CH11357 for Argonne National
// Laboratory (ANL), which is operated by UChicago Argonne, LLC for the
// U.S. Department of Energy. The U.S. Government has rights to use,
// reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR
// UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR
// ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is
// modified to produce derivative works, such modified software should
// be clearly marked, so as not to confuse it with the version available
// from ANL.
// Additionally, redistribution and use in source and binary forms, with
// or without modification, are permitted provided that the following
// conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
// * Neither the name of UChicago Argonne, LLC, Argonne National
// Laboratory, ANL, the U.S. Government, nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago
// Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include "utils.h"
#include <float.h>
#include <stdint.h>
// for windows build
#ifdef WIN32
# ifdef PY3K
void
PyInit_libtomopy(void)
{
}
# else
void
initlibtomopy(void)
{
}
# endif
#endif
//======================================================================================//
void
preprocessing(int ry, int rz, int num_pixels, float center, float* mov, float* gridx,
float* gridy)
{
for(int i = 0; i <= ry; ++i)
{
gridx[i] = -ry * 0.5f + i;
}
for(int i = 0; i <= rz; ++i)
{
gridy[i] = -rz * 0.5f + i;
}
*mov = ((float) num_pixels - 1) * 0.5f - center;
if(*mov - floor(*mov) < 0.01f)
{
*mov += 0.01f;
}
*mov += 0.5;
}
//======================================================================================//
int
calc_quadrant(float theta_p)
{
// here we cast the float to an integer and rescale the integer to
// near INT_MAX to retain the precision. This method was tested
// on 1M random random floating points between -2*pi and 2*pi and
// was found to produce a speed up of:
//
// - 14.5x (Intel i7 MacBook)
// - 2.2x (NERSC KNL)
// - 1.5x (NERSC Edison)
// - 1.7x (NERSC Haswell)
//
// with a 0.0% incorrect quadrant determination rate
//
const int32_t ipi_c = 340870420;
int32_t theta_i = (int32_t)(theta_p * ipi_c);
theta_i += (theta_i < 0) ? (2.0f * M_PI * ipi_c) : 0;
return ((theta_i >= 0 && theta_i < 0.5f * M_PI * ipi_c) ||
(theta_i >= 1.0f * M_PI * ipi_c && theta_i < 1.5f * M_PI * ipi_c))
? 1
: 0;
}
//======================================================================================//
void
calc_coords(int ry, int rz, float xi, float yi, float sin_p, float cos_p,
const float* gridx, const float* gridy, float* coordx, float* coordy)
{
float srcx = xi * cos_p - yi * sin_p;
float srcy = xi * sin_p + yi * cos_p;
float detx = -xi * cos_p - yi * sin_p;
float dety = -xi * sin_p + yi * cos_p;
float slope = (srcy - dety) / (srcx - detx);
float islope = (srcx - detx) / (srcy - dety);
#pragma omp simd
for(int n = 0; n <= rz; ++n)
{
coordx[n] = islope * (gridy[n] - srcy) + srcx;
}
#pragma omp simd
for(int n = 0; n <= ry; ++n)
{
coordy[n] = slope * (gridx[n] - srcx) + srcy;
}
}
//======================================================================================//
void
trim_coords(int ry, int rz, const float* coordx, const float* coordy, const float* gridx,
const float* gridy, int* asize, float* ax, float* ay, int* bsize, float* bx,
float* by)
{
*asize = 0;
*bsize = 0;
float gridx_gt = gridx[0] + 0.01f;
float gridx_le = gridx[ry] - 0.01f;
for(int n = 0; n <= rz; ++n)
{
if(coordx[n] >= gridx_gt && coordx[n] <= gridx_le)
{
ax[*asize] = coordx[n];
ay[*asize] = gridy[n];
++(*asize);
}
}
float gridy_gt = gridy[0] + 0.01f;
float gridy_le = gridy[rz] - 0.01f;
for(int n = 0; n <= ry; ++n)
{
if(coordy[n] >= gridy_gt && coordy[n] <= gridy_le)
{
bx[*bsize] = gridx[n];
by[*bsize] = coordy[n];
++(*bsize);
}
}
}
//======================================================================================//
void
sort_intersections(int ind_condition, int asize, const float* ax, const float* ay,
int bsize, const float* bx, const float* by, int* csize, float* coorx,
float* coory)
{
int i = 0, j = 0, k = 0;
if(ind_condition == 0)
{
while(i < asize && j < bsize)
{
if(ax[asize - 1 - i] < bx[j])
{
coorx[k] = ax[asize - 1 - i];
coory[k] = ay[asize - 1 - i];
++i;
}
else
{
coorx[k] = bx[j];
coory[k] = by[j];
++j;
}
++k;
}
while(i < asize)
{
coorx[k] = ax[asize - 1 - i];
coory[k] = ay[asize - 1 - i];
++i;
++k;
}
while(j < bsize)
{
coorx[k] = bx[j];
coory[k] = by[j];
++j;
++k;
}
(*csize) = asize + bsize;
}
else
{
while(i < asize && j < bsize)
{
if(ax[i] < bx[j])
{
coorx[k] = ax[i];
coory[k] = ay[i];
++i;
}
else
{
coorx[k] = bx[j];
coory[k] = by[j];
++j;
}
++k;
}
while(i < asize)
{
coorx[k] = ax[i];
coory[k] = ay[i];
++i;
++k;
}
while(j < bsize)
{
coorx[k] = bx[j];
coory[k] = by[j];
++j;
++k;
}
(*csize) = asize + bsize;
}
}
//======================================================================================//
void
calc_dist(int ry, int rz, int csize, const float* coorx, const float* coory, int* indi,
float* dist)
{
if(csize < 2)
return;
const int _size = csize - 1;
//------------------------------------------------------------------------//
// calculate dist
//------------------------------------------------------------------------//
{
float* _diffx = malloc(_size * sizeof(float));
float* _diffy = malloc(_size * sizeof(float));
#pragma omp simd
for(int n = 0; n < _size; ++n)
{
_diffx[n] = (coorx[n + 1] - coorx[n]) * (coorx[n + 1] - coorx[n]);
}
#pragma omp simd
for(int n = 0; n < _size; ++n)
{
_diffy[n] = (coory[n + 1] - coory[n]) * (coory[n + 1] - coory[n]);
}
#pragma omp simd
for(int n = 0; n < _size; ++n)
{
dist[n] = sqrtf(_diffx[n] + _diffy[n]);
}
free(_diffx);
free(_diffy);
}
//------------------------------------------------------------------------//
// calculate indi
//------------------------------------------------------------------------//
int* _indx = malloc(_size * sizeof(int));
int* _indy = malloc(_size * sizeof(int));
#pragma omp simd
for(int n = 0; n < _size; ++n)
{
float _midx = 0.5f * (coorx[n + 1] + coorx[n]);
float _x1 = _midx + 0.5f * ry;
float _i1 = (int) (_midx + 0.5f * ry);
_indx[n] = _i1 - (_i1 > _x1);
}
#pragma omp simd
for(int n = 0; n < _size; ++n)
{
float _midy = 0.5f * (coory[n + 1] + coory[n]);
float _x2 = _midy + 0.5f * rz;
float _i2 = (int) (_midy + 0.5f * rz);
_indy[n] = _i2 - (_i2 > _x2);
}
#pragma omp simd
for(int n = 0; n < _size; ++n)
{
indi[n] = _indy[n] + (_indx[n] * rz);
}
free(_indx);
free(_indy);
}
//======================================================================================//
void
calc_dist2(int ry, int rz, int csize, const float* coorx, const float* coory, int* indx,
int* indy, float* dist)
{
#pragma omp simd
for(int n = 0; n < csize - 1; ++n)
{
float diffx = coorx[n + 1] - coorx[n];
float diffy = coory[n + 1] - coory[n];
dist[n] = sqrt(diffx * diffx + diffy * diffy);
}
#pragma omp simd
for(int n = 0; n < csize - 1; ++n)
{
float midx = (coorx[n + 1] + coorx[n]) * 0.5f;
float midy = (coory[n + 1] + coory[n]) * 0.5f;
float x1 = midx + ry * 0.5f;
float x2 = midy + rz * 0.5f;
int i1 = (int) (midx + ry * 0.5f);
int i2 = (int) (midy + rz * 0.5f);
indx[n] = i1 - (i1 > x1);
indy[n] = i2 - (i2 > x2);
}
}
//======================================================================================//
void
calc_simdata(int s, int p, int d, int ry, int rz, int dt, int dx, int csize,
const int* indi, const float* dist, const float* model, float* simdata)
{
int index_model = s * ry * rz;
int index_data = d + p * dx + s * dt * dx;
for(int n = 0; n < csize - 1; ++n)
{
simdata[index_data] += model[indi[n] + index_model] * dist[n];
}
}
//======================================================================================//
void
calc_simdata2(int s, int p, int d, int ry, int rz, int dt, int dx, int csize,
const int* indx, const int* indy, const float* dist, float vx, float vy,
const float* modelx, const float* modely, float* simdata)
{
int n;
for(n = 0; n < csize - 1; n++)
{
simdata[d + p * dx + s * dt * dx] +=
(modelx[indy[n] + indx[n] * rz + s * ry * rz] * vx +
modely[indy[n] + indx[n] * rz + s * ry * rz] * vy) *
dist[n];
}
}
//======================================================================================//
void
calc_simdata3(int s, int p, int d, int ry, int rz, int dt, int dx, int csize,
const int* indx, const int* indy, const float* dist, float vx, float vy,
const float* modelx, const float* modely, const float* modelz, int axis,
float* simdata)
{
int n;
if(axis == 0)
{
for(n = 0; n < csize - 1; n++)
{
simdata[d + p * dx + s * dt * dx] +=
(modelx[indy[n] + indx[n] * rz + s * ry * rz] * vx +
modely[indy[n] + indx[n] * rz + s * ry * rz] * vy) *
dist[n];
}
}
else if(axis == 1)
{
for(n = 0; n < csize - 1; n++)
{
simdata[d + p * dx + s * dt * dx] +=
(modely[s + indx[n] * rz + indy[n] * ry * rz] * vx +
modelz[s + indx[n] * rz + indy[n] * ry * rz] * vy) *
dist[n];
}
}
else if(axis == 2)
{
for(n = 0; n < csize - 1; n++)
{
simdata[d + p * dx + s * dt * dx] +=
(modelx[indx[n] + s * rz + indy[n] * ry * rz] * vx +
modelz[indx[n] + s * rz + indy[n] * ry * rz] * vy) *
dist[n];
}
}
}
//======================================================================================//
|
kmp_detach_tasks_t3.c | // RUN: %libomp-compile && env OMP_NUM_THREADS='3' %libomp-run
// RUN: %libomp-compile && env OMP_NUM_THREADS='1' %libomp-run
// The runtime currently does not get dependency information from GCC.
// UNSUPPORTED: gcc
#include <stdio.h>
#include <omp.h>
#include "omp_my_sleep.h"
// detached untied
#define PTASK_FLAG_DETACHABLE 0x40
// OpenMP RTL interfaces
typedef unsigned long long kmp_uint64;
typedef long long kmp_int64;
typedef struct ID {
int reserved_1;
int flags;
int reserved_2;
int reserved_3;
char *psource;
} id;
// Compiler-generated code (emulation)
typedef struct ident {
void* dummy; // not used in the library
} ident_t;
typedef enum kmp_event_type_t {
KMP_EVENT_UNINITIALIZED = 0,
KMP_EVENT_ALLOW_COMPLETION = 1
} kmp_event_type_t;
typedef struct {
kmp_event_type_t type;
union {
void *task;
} ed;
} kmp_event_t;
typedef struct shar { // shareds used in the task
} *pshareds;
typedef struct task {
pshareds shareds;
int(*routine)(int,struct task*);
int part_id;
// void *destructor_thunk; // optional, needs flag setting if provided
// int priority; // optional, needs flag setting if provided
// ------------------------------
// privates used in the task:
omp_event_handle_t evt;
} *ptask, kmp_task_t;
typedef struct DEP {
size_t addr;
size_t len;
int flags;
} dep;
typedef int(* task_entry_t)( int, ptask );
#ifdef __cplusplus
extern "C" {
#endif
extern int __kmpc_global_thread_num(void *id_ref);
extern int** __kmpc_omp_task_alloc(id *loc, int gtid, int flags,
size_t sz, size_t shar, task_entry_t rtn);
extern int __kmpc_omp_task_with_deps(id *loc, int gtid, ptask task, int nd,
dep *dep_lst, int nd_noalias, dep *noalias_dep_lst);
extern int __kmpc_omp_task(id *loc, int gtid, kmp_task_t *task);
extern omp_event_handle_t __kmpc_task_allow_completion_event(
ident_t *loc_ref, int gtid, kmp_task_t *task);
#ifdef __cplusplus
}
#endif
int volatile checker;
// User's code, outlined into task entry
int task_entry(int gtid, ptask task) {
checker = 1;
return 0;
}
int main() {
int i, j, gtid = __kmpc_global_thread_num(NULL);
int nt = omp_get_max_threads();
ptask task;
pshareds psh;
checker = 0;
omp_set_dynamic(0);
#pragma omp parallel //num_threads(N)
{
#pragma omp master
{
#pragma omp task depend(inout:nt)
{
my_sleep(2.0);
}
int gtid = __kmpc_global_thread_num(NULL);
omp_event_handle_t evt;
/*
#pragma omp task detach(evt)
{}
*/
task = (ptask)__kmpc_omp_task_alloc(NULL,gtid,PTASK_FLAG_DETACHABLE,
sizeof(struct task),sizeof(struct shar),&task_entry);
psh = task->shareds;
evt = (omp_event_handle_t)__kmpc_task_allow_completion_event(NULL,gtid,task);
task->evt = evt;
dep sdep;
sdep.addr = (size_t)&nt;
sdep.len = 0L;
sdep.flags = 3;
__kmpc_omp_task_with_deps(NULL,gtid,task,1,&sdep,0,0);
//__kmpc_omp_task(NULL, gtid, task);
omp_fulfill_event(evt);
#pragma omp taskwait
;
// printf("after tw %d\n", omp_get_thread_num());
} // end master
} // end parallel
// check results
if (checker == 1) {
printf("passed\n");
return 0;
} else {
printf("failed\n");
return 1;
}
}
|
dataracetest1.c | int main() {
double a[10];
#pragma omp parallel for
for(int i=1;i<4;i++) {
a[i]=a[i+1];
}
return 0;
}
|
scoping.c | int bar() {
int x = 10;
{
int y = 19 + x;
int z = 11;
for (;;) {
z++;
{
int x = 11;
x++;
}
z = x - 1;
}
}
}
int foo() {
int a = 10 + bar();
while (1) {
a = 10;
{
int a;
a = 15;
}
a--;
break;
}
}
int main() {
int x = 10;
int y = 5;
int q = 11;
l1: l2: {
int z = 10 + x + foo();
int i;
l3: l4: i = z + 11 - y - q;
#pragma omp parallel
#pragma omp for
for (i = 0; i < 100; i++) {
int x;
x = 11;
{
x = 11;
l5: {
int q = 10;
x = 15 + q;
}
x++;
l6: {
int p = 0;
l11: y = y - 1;
#pragma omp critical
{
l10: y += p + q;
}
p++;
}
}
}
}
}
|
5-32t.c | #include <stdio.h>
#include <omp.h>
int main()
{
int i;
int sum=0;
omp_set_num_threads(32);
#pragma omp parallel for
for (i=0; i<COUNT; i++)
{
sum = sum + i;
printf("Thread number: %d Iteration: %d Local Sum: %d \n",
omp_get_thread_num(), i, sum);
}
printf("\n All Threads Done – Final Global Sum: %d \n\n", sum);
}
|
tinyexr.h | /*
Copyright (c) 2014 - 2018, Syoyo Fujita
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Syoyo Fujita nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// TinyEXR contains some OpenEXR code, which is licensed under ------------
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2002, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Industrial Light & Magic nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
// End of OpenEXR license -------------------------------------------------
#ifndef TINYEXR_H_
#define TINYEXR_H_
//
//
// Do this:
// #define TINYEXR_IMPLEMENTATION
// before you include this file in *one* C or C++ file to create the
// implementation.
//
// // i.e. it should look like this:
// #include ...
// #include ...
// #include ...
// #define TINYEXR_IMPLEMENTATION
// #include "tinyexr.h"
//
//
#include <stddef.h> // for size_t
#include <stdint.h> // guess stdint.h is available(C99)
#ifdef __cplusplus
extern "C" {
#endif
// Use embedded miniz or not to decode ZIP format pixel. Linking with zlib
// required if this flas is 0.
#ifndef TINYEXR_USE_MINIZ
#define TINYEXR_USE_MINIZ (1)
#endif
// Disable PIZ comporession when applying cpplint.
#ifndef TINYEXR_USE_PIZ
#define TINYEXR_USE_PIZ (1)
#endif
#ifndef TINYEXR_USE_ZFP
#define TINYEXR_USE_ZFP (0) // TinyEXR extension.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#define TINYEXR_SUCCESS (0)
#define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1)
#define TINYEXR_ERROR_INVALID_EXR_VERSION (-2)
#define TINYEXR_ERROR_INVALID_ARGUMENT (-3)
#define TINYEXR_ERROR_INVALID_DATA (-4)
#define TINYEXR_ERROR_INVALID_FILE (-5)
#define TINYEXR_ERROR_INVALID_PARAMETER (-5)
#define TINYEXR_ERROR_CANT_OPEN_FILE (-6)
#define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-7)
#define TINYEXR_ERROR_INVALID_HEADER (-8)
#define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-9)
// @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf }
// pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2
#define TINYEXR_PIXELTYPE_UINT (0)
#define TINYEXR_PIXELTYPE_HALF (1)
#define TINYEXR_PIXELTYPE_FLOAT (2)
#define TINYEXR_MAX_ATTRIBUTES (128)
#define TINYEXR_COMPRESSIONTYPE_NONE (0)
#define TINYEXR_COMPRESSIONTYPE_RLE (1)
#define TINYEXR_COMPRESSIONTYPE_ZIPS (2)
#define TINYEXR_COMPRESSIONTYPE_ZIP (3)
#define TINYEXR_COMPRESSIONTYPE_PIZ (4)
#define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension
#define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0)
#define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1)
#define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2)
#define TINYEXR_TILE_ONE_LEVEL (0)
#define TINYEXR_TILE_MIPMAP_LEVELS (1)
#define TINYEXR_TILE_RIPMAP_LEVELS (2)
#define TINYEXR_TILE_ROUND_DOWN (0)
#define TINYEXR_TILE_ROUND_UP (1)
typedef struct _EXRVersion {
int version; // this must be 2
int tiled; // tile format image
int long_name; // long name attribute
int non_image; // deep image(EXR 2.0)
int multipart; // multi-part(EXR 2.0)
} EXRVersion;
typedef struct _EXRAttribute {
char name[256]; // name and type are up to 255 chars long.
char type[256];
unsigned char *value; // uint8_t*
int size;
int pad0;
} EXRAttribute;
typedef struct _EXRChannelInfo {
char name[256]; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} EXRChannelInfo;
typedef struct _EXRTile {
int offset_x;
int offset_y;
int level_x;
int level_y;
int width; // actual width in a tile.
int height; // actual height int a tile.
unsigned char **images; // image[channels][pixels]
} EXRTile;
typedef struct _EXRHeader {
float pixel_aspect_ratio;
int line_order;
int data_window[4];
int display_window[4];
float screen_window_center[2];
float screen_window_width;
int chunk_count;
// Properties for tiled format(`tiledesc`).
int tiled;
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
int long_name;
int non_image;
int multipart;
unsigned int header_len;
// Custom attributes(exludes required attributes(e.g. `channels`,
// `compression`, etc)
int num_custom_attributes;
EXRAttribute custom_attributes[TINYEXR_MAX_ATTRIBUTES];
EXRChannelInfo *channels; // [num_channels]
int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for
// each channel. This is overwritten with `requested_pixel_types` when
// loading.
int num_channels;
int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*)
int *requested_pixel_types; // Filled initially by
// ParseEXRHeaderFrom(Meomory|File), then users
// can edit it(only valid for HALF pixel type
// channel)
} EXRHeader;
typedef struct _EXRMultiPartHeader {
int num_headers;
EXRHeader *headers;
} EXRMultiPartHeader;
typedef struct _EXRImage {
EXRTile *tiles; // Tiled pixel data. The application must reconstruct image
// from tiles manually. NULL if scanline format.
unsigned char **images; // image[channels][pixels]. NULL if tiled format.
int width;
int height;
int num_channels;
// Properties for tile format.
int num_tiles;
} EXRImage;
typedef struct _EXRMultiPartImage {
int num_images;
EXRImage *images;
} EXRMultiPartImage;
typedef struct _DeepImage {
const char **channel_names;
float ***image; // image[channels][scanlines][samples]
int **offset_table; // offset_table[scanline][offsets]
int num_channels;
int width;
int height;
int pad0;
} DeepImage;
// @deprecated { to be removed. }
// Loads single-frame OpenEXR image. Assume EXR image contains A(single channel
// alpha) or RGB(A) channels.
// Application must free image data as returned by `out_rgba`
// Result image format is: float x RGBA x width x hight
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXR(float **out_rgba, int *width, int *height,
const char *filename, const char **err);
// @deprecated { to be removed. }
// Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels.
// components must be 1(Grayscale), 3(RGB) or 4(RGBA).
// Input image format is: `float x width x height`, or `float x RGB(A) x width x
// hight`
// Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero
// value.
// Save image as fp32(FLOAT) format when `save_as_fp16` is 0.
extern int SaveEXR(const float *data, const int width, const int height,
const int components, const int save_as_fp16,
const char *filename);
// Initialize EXRHeader struct
extern void InitEXRHeader(EXRHeader *exr_header);
// Initialize EXRImage struct
extern void InitEXRImage(EXRImage *exr_image);
// Free's internal data of EXRHeader struct
extern int FreeEXRHeader(EXRHeader *exr_header);
// Free's internal data of EXRImage struct
extern int FreeEXRImage(EXRImage *exr_image);
// Parse EXR version header of a file.
extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename);
// Parse EXR version header from memory-mapped EXR data.
extern int ParseEXRVersionFromMemory(EXRVersion *version,
const unsigned char *memory, size_t size);
// Parse single-part OpenEXR header from a file and initialize `EXRHeader`.
extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version,
const char *filename, const char **err);
// Parse single-part OpenEXR header from a memory and initialize `EXRHeader`.
extern int ParseEXRHeaderFromMemory(EXRHeader *header,
const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err);
// Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*`
// array.
extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const char *filename,
const char **err);
// Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*`
// array
extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const unsigned char *memory,
size_t size, const char **err);
// Loads single-part OpenEXR image from a file.
// Application must setup `ParseEXRHeaderFromFile` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header,
const char *filename, const char **err);
// Loads single-part OpenEXR image from a memory.
// Application must setup `EXRHeader` with
// `ParseEXRHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header,
const unsigned char *memory,
const size_t size, const char **err);
// Loads multi-part OpenEXR image from a file.
// Application must setup `ParseEXRMultipartHeaderFromFile` before calling this
// function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXRMultipartImageFromFile(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const char *filename,
const char **err);
// Loads multi-part OpenEXR image from a memory.
// Application must setup `EXRHeader*` array with
// `ParseEXRMultipartHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXRMultipartImageFromMemory(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err);
// Saves multi-channel, single-frame OpenEXR image to a file.
// Returns negative value and may set error string in `err` when there's an
// error
extern int SaveEXRImageToFile(const EXRImage *image,
const EXRHeader *exr_header, const char *filename,
const char **err);
// Saves multi-channel, single-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// Return the number of bytes if succes.
// Returns negative value and may set error string in `err` when there's an
// error
extern size_t SaveEXRImageToMemory(const EXRImage *image,
const EXRHeader *exr_header,
unsigned char **memory, const char **err);
// Loads single-frame OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadDeepEXR(DeepImage *out_image, const char *filename,
const char **err);
// NOT YET IMPLEMENTED:
// Saves single-frame OpenEXR deep image.
// Returns negative value and may set error string in `err` when there's an
// error
// extern int SaveDeepEXR(const DeepImage *in_image, const char *filename,
// const char **err);
// NOT YET IMPLEMENTED:
// Loads multi-part OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const
// char *filename,
// const char **err);
// For emscripten.
// Loads single-frame OpenEXR image from memory. Assume EXR image contains
// RGB(A) channels.
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err);
#ifdef __cplusplus
}
#endif
#endif // TINYEXR_H_
#ifdef TINYEXR_IMPLEMENTATION
#ifndef TINYEXR_IMPLEMENTATION_DEIFNED
#define TINYEXR_IMPLEMENTATION_DEIFNED
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <sstream>
#include <string>
#include <vector>
#include <limits>
#if __cplusplus > 199711L
// C++11
#include <cstdint>
#endif // __cplusplus > 199711L
#ifdef _OPENMP
#include <omp.h>
#endif
#if TINYEXR_USE_MINIZ
#else
// Issue #46. Please include your own zlib-compatible API header before
// including `tinyexr.h`
//#include "zlib.h"
#endif
#if TINYEXR_USE_ZFP
#include "zfp.h"
#endif
namespace tinyexr {
#if __cplusplus > 199711L
// C++11
typedef uint64_t tinyexr_uint64;
typedef int64_t tinyexr_int64;
#else
// Although `long long` is not a standard type pre C++11, assume it is defined
// as a compiler's extension.
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#endif
typedef unsigned long long tinyexr_uint64;
typedef long long tinyexr_int64;
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
#if TINYEXR_USE_MINIZ
namespace miniz {
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wunused-function"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#pragma clang diagnostic ignored "-Wundef"
#if __has_warning("-Wcomma")
#pragma clang diagnostic ignored "-Wcomma"
#endif
#if __has_warning("-Wmacro-redefined")
#pragma clang diagnostic ignored "-Wmacro-redefined"
#endif
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#endif
/* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP
reading/writing/appending, PNG writing
See "unlicense" statement at the end of this file.
Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013
Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951:
http://www.ietf.org/rfc/rfc1951.txt
Most API's defined in miniz.c are optional. For example, to disable the
archive related functions just define
MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO
(see the list below for more macros).
* Change History
10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major
release with Zip64 support (almost there!):
- Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug
(thanks kahmyong.moon@hp.com) which could cause locate files to not find
files. This bug
would only have occured in earlier versions if you explicitly used this
flag, OR if you used mz_zip_extract_archive_file_to_heap() or
mz_zip_add_mem_to_archive_file_in_place()
(which used this flag). If you can't switch to v1.15 but want to fix
this bug, just remove the uses of this flag from both helper funcs (and of
course don't use the flag).
- Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when
pUser_read_buf is not NULL and compressed size is > uncompressed size
- Fixing mz_zip_reader_extract_*() funcs so they don't try to extract
compressed data from directory entries, to account for weird zipfiles which
contain zero-size compressed data on dir entries.
Hopefully this fix won't cause any issues on weird zip archives,
because it assumes the low 16-bits of zip external attributes are DOS
attributes (which I believe they always are in practice).
- Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the
internal attributes, just the filename and external attributes
- mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed
- Added cmake support for Linux builds which builds all the examples,
tested with clang v3.3 and gcc v4.6.
- Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti
- Merged MZ_FORCEINLINE fix from hdeanclark
- Fix <time.h> include before config #ifdef, thanks emil.brink
- Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping
(super useful for OpenGL apps), and explicit control over the compression
level (so you can
set it to 1 for real-time compression).
- Merged in some compiler fixes from paulharris's github repro.
- Retested this build under Windows (VS 2010, including static analysis),
tcc 0.9.26, gcc v4.6 and clang v3.3.
- Added example6.c, which dumps an image of the mandelbrot set to a PNG
file.
- Modified example2 to help test the
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more.
- In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix
possible src file fclose() leak if alignment bytes+local header file write
faiiled
- In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader():
Was pushing the wrong central dir header offset, appears harmless in this
release, but it became a problem in the zip64 branch
5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE,
#include <time.h> (thanks fermtect).
5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix
mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit.
- Temporarily/locally slammed in "typedef unsigned long mz_ulong" and
re-ran a randomized regression test on ~500k files.
- Eliminated a bunch of warnings when compiling with GCC 32-bit/64.
- Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze
(static analysis) option and fixed all warnings (except for the silly
"Use of the comma-operator in a tested expression.." analysis warning,
which I purposely use to work around a MSVC compiler warning).
- Created 32-bit and 64-bit Codeblocks projects/workspace. Built and
tested Linux executables. The codeblocks workspace is compatible with
Linux+Win32/x64.
- Added miniz_tester solution/project, which is a useful little app
derived from LZHAM's tester app that I use as part of the regression test.
- Ran miniz.c and tinfl.c through another series of regression testing on
~500,000 files and archives.
- Modified example5.c so it purposely disables a bunch of high-level
functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the
MINIZ_NO_STDIO bug report.)
- Fix ftell() usage in examples so they exit with an error on files which
are too large (a limitation of the examples, not miniz itself).
4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple
minor level_and_flags issues in the archive API's.
level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce
Dawson <bruced@valvesoftware.com> for the feedback/bug report.
5/28/11 v1.11 - Added statement from unlicense.org
5/27/11 v1.10 - Substantial compressor optimizations:
- Level 1 is now ~4x faster than before. The L1 compressor's throughput
now varies between 70-110MB/sec. on a
- Core i7 (actual throughput varies depending on the type of data, and x64
vs. x86).
- Improved baseline L2-L9 compression perf. Also, greatly improved
compression perf. issues on some file types.
- Refactored the compression code for better readability and
maintainability.
- Added level 10 compression level (L10 has slightly better ratio than
level 9, but could have a potentially large
drop in throughput on some files).
5/15/11 v1.09 - Initial stable release.
* Low-level Deflate/Inflate implementation notes:
Compression: Use the "tdefl" API's. The compressor supports raw, static,
and dynamic blocks, lazy or
greedy parsing, match length filtering, RLE-only, and Huffman-only streams.
It performs and compresses
approximately as well as zlib.
Decompression: Use the "tinfl" API's. The entire decompressor is
implemented as a single function
coroutine: see tinfl_decompress(). It supports decompression into a 32KB
(or larger power of 2) wrapping buffer, or into a memory
block large enough to hold the entire file.
The low-level tdefl/tinfl API's do not make any use of dynamic memory
allocation.
* zlib-style API notes:
miniz.c implements a fairly large subset of zlib. There's enough
functionality present for it to be a drop-in
zlib replacement in many apps:
The z_stream struct, optional memory allocation callbacks
deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound
inflateInit/inflateInit2/inflate/inflateEnd
compress, compress2, compressBound, uncompress
CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly
routines.
Supports raw deflate streams or standard zlib streams with adler-32
checking.
Limitations:
The callback API's are not implemented yet. No support for gzip headers or
zlib static dictionaries.
I've tried to closely emulate zlib's various flavors of stream flushing
and return status codes, but
there are no guarantees that miniz.c pulls this off perfectly.
* PNG writing: See the tdefl_write_image_to_png_file_in_memory() function,
originally written by
Alex Evans. Supports 1-4 bytes/pixel images.
* ZIP archive API notes:
The ZIP archive API's where designed with simplicity and efficiency in
mind, with just enough abstraction to
get the job done with minimal fuss. There are simple API's to retrieve file
information, read files from
existing archives, create new archives, append new files to existing
archives, or clone archive data from
one archive to another. It supports archives located in memory or the heap,
on disk (using stdio.h),
or you can specify custom file read/write callbacks.
- Archive reading: Just call this function to read a single file from a
disk archive:
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const
char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
For more complex cases, use the "mz_zip_reader" functions. Upon opening an
archive, the entire central
directory is located and read as-is into memory, and subsequent file access
only occurs when reading individual files.
- Archives file scanning: The simple way is to use this function to scan a
loaded archive for a specific file:
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
The locate operation can optionally check file comments too, which (as one
example) can be used to identify
multiple versions of the same file in an archive. This function uses a
simple linear search through the central
directory, so it's not very fast.
Alternately, you can iterate through all the files in an archive (using
mz_zip_reader_get_num_files()) and
retrieve detailed info on each file by calling mz_zip_reader_file_stat().
- Archive creation: Use the "mz_zip_writer" functions. The ZIP writer
immediately writes compressed file data
to disk and builds an exact image of the central directory in memory. The
central directory image is written
all at once at the end of the archive file when the archive is finalized.
The archive writer can optionally align each file's local header and file
data to any power of 2 alignment,
which can be useful when the archive will be read from optical media. Also,
the writer supports placing
arbitrary data blobs at the very beginning of ZIP archives. Archives
written using either feature are still
readable by any ZIP tool.
- Archive appending: The simple way to add a single file to an archive is
to call this function:
mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename,
const char *pArchive_name,
const void *pBuf, size_t buf_size, const void *pComment, mz_uint16
comment_size, mz_uint level_and_flags);
The archive will be created if it doesn't already exist, otherwise it'll be
appended to.
Note the appending is done in-place and is not an atomic operation, so if
something goes wrong
during the operation it's possible the archive could be left without a
central directory (although the local
file headers and file data will be fine, so the archive will be
recoverable).
For more complex archive modification scenarios:
1. The safest way is to use a mz_zip_reader to read the existing archive,
cloning only those bits you want to
preserve into a new archive using using the
mz_zip_writer_add_from_zip_reader() function (which compiles the
compressed file data as-is). When you're done, delete the old archive and
rename the newly written archive, and
you're done. This is safe but requires a bunch of temporary disk space or
heap memory.
2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using
mz_zip_writer_init_from_reader(),
append new files as needed, then finalize the archive which will write an
updated central directory to the
original archive. (This is basically what
mz_zip_add_mem_to_archive_file_in_place() does.) There's a
possibility that the archive's central directory could be lost with this
method if anything goes wrong, though.
- ZIP archive support limitations:
No zip64 or spanning support. Extraction functions can only handle
unencrypted, stored or deflated files.
Requires streams capable of seeking.
* This is a header file library, like stb_image.c. To get only a header file,
either cut and paste the
below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then
include miniz.c from it.
* Important: For best perf. be sure to customize the below macros for your
target platform:
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_LITTLE_ENDIAN 1
#define MINIZ_HAS_64BIT_REGISTERS 1
* On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before
including miniz.c to ensure miniz
uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be
able to process large files
(i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes).
*/
#ifndef MINIZ_HEADER_INCLUDED
#define MINIZ_HEADER_INCLUDED
//#include <stdlib.h>
// Defines to completely disable specific portions of miniz.c:
// If all macros here are defined the only functionality remaining will be
// CRC-32, adler-32, tinfl, and tdefl.
// Define MINIZ_NO_STDIO to disable all usage and any functions which rely on
// stdio for file I/O.
//#define MINIZ_NO_STDIO
// If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able
// to get the current time, or
// get/set file times, and the C run-time funcs that get/set times won't be
// called.
// The current downside is the times written to your archives will be from 1979.
#define MINIZ_NO_TIME
// Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's.
#define MINIZ_NO_ARCHIVE_APIS
// Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive
// API's.
//#define MINIZ_NO_ARCHIVE_WRITING_APIS
// Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression
// API's.
//#define MINIZ_NO_ZLIB_APIS
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent
// conflicts against stock zlib.
//#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES
// Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc.
// Note if MINIZ_NO_MALLOC is defined then the user must always provide custom
// user alloc/free/realloc
// callbacks to the zlib and archive API's, and a few stand-alone helper API's
// which don't provide custom user
// functions (such as tdefl_compress_mem_to_heap() and
// tinfl_decompress_mem_to_heap()) won't work.
//#define MINIZ_NO_MALLOC
#if defined(__TINYC__) && (defined(__linux) || defined(__linux__))
// TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc
// on Linux
#define MINIZ_NO_TIME
#endif
#if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS)
//#include <time.h>
#endif
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__i386) || defined(__i486__) || defined(__i486) || \
defined(i386) || defined(__ia64__) || defined(__x86_64__)
// MINIZ_X86_OR_X64_CPU is only used to help set the below macros.
#define MINIZ_X86_OR_X64_CPU 1
#endif
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#if MINIZ_X86_OR_X64_CPU
// Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient
// integer loads and stores from unaligned addresses.
//#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \
0 // disable to suppress compiler warnings
#endif
#if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \
defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \
defined(__x86_64__)
// Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are
// reasonably fast (and don't involve compiler generated calls to helper
// functions).
#define MINIZ_HAS_64BIT_REGISTERS 1
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API Definitions.
// For more compatibility with zlib, miniz.c uses unsigned long for some
// parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits!
typedef unsigned long mz_ulong;
// mz_free() internally uses the MZ_FREE() macro (which by default calls free()
// unless you've modified the MZ_MALLOC macro) to release a block allocated from
// the heap.
void mz_free(void *p);
#define MZ_ADLER32_INIT (1)
// mz_adler32() returns the initial adler-32 value to use when called with
// ptr==NULL.
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len);
#define MZ_CRC32_INIT (0)
// mz_crc32() returns the initial CRC-32 value to use when called with
// ptr==NULL.
mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len);
// Compression strategies.
enum {
MZ_DEFAULT_STRATEGY = 0,
MZ_FILTERED = 1,
MZ_HUFFMAN_ONLY = 2,
MZ_RLE = 3,
MZ_FIXED = 4
};
// Method
#define MZ_DEFLATED 8
#ifndef MINIZ_NO_ZLIB_APIS
// Heap allocation callbacks.
// Note that mz_alloc_func parameter types purpsosely differ from zlib's:
// items/size is size_t, not unsigned long.
typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size);
typedef void (*mz_free_func)(void *opaque, void *address);
typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items,
size_t size);
#define MZ_VERSION "9.1.15"
#define MZ_VERNUM 0x91F0
#define MZ_VER_MAJOR 9
#define MZ_VER_MINOR 1
#define MZ_VER_REVISION 15
#define MZ_VER_SUBREVISION 0
// Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The
// other values are for advanced use (refer to the zlib docs).
enum {
MZ_NO_FLUSH = 0,
MZ_PARTIAL_FLUSH = 1,
MZ_SYNC_FLUSH = 2,
MZ_FULL_FLUSH = 3,
MZ_FINISH = 4,
MZ_BLOCK = 5
};
// Return status codes. MZ_PARAM_ERROR is non-standard.
enum {
MZ_OK = 0,
MZ_STREAM_END = 1,
MZ_NEED_DICT = 2,
MZ_ERRNO = -1,
MZ_STREAM_ERROR = -2,
MZ_DATA_ERROR = -3,
MZ_MEM_ERROR = -4,
MZ_BUF_ERROR = -5,
MZ_VERSION_ERROR = -6,
MZ_PARAM_ERROR = -10000
};
// Compression levels: 0-9 are the standard zlib-style levels, 10 is best
// possible compression (not zlib compatible, and may be very slow),
// MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL.
enum {
MZ_NO_COMPRESSION = 0,
MZ_BEST_SPEED = 1,
MZ_BEST_COMPRESSION = 9,
MZ_UBER_COMPRESSION = 10,
MZ_DEFAULT_LEVEL = 6,
MZ_DEFAULT_COMPRESSION = -1
};
// Window bits
#define MZ_DEFAULT_WINDOW_BITS 15
struct mz_internal_state;
// Compression/decompression stream struct.
typedef struct mz_stream_s {
const unsigned char *next_in; // pointer to next byte to read
unsigned int avail_in; // number of bytes available at next_in
mz_ulong total_in; // total number of bytes consumed so far
unsigned char *next_out; // pointer to next byte to write
unsigned int avail_out; // number of bytes that can be written to next_out
mz_ulong total_out; // total number of bytes produced so far
char *msg; // error msg (unused)
struct mz_internal_state *state; // internal state, allocated by zalloc/zfree
mz_alloc_func
zalloc; // optional heap allocation function (defaults to malloc)
mz_free_func zfree; // optional heap free function (defaults to free)
void *opaque; // heap alloc function user pointer
int data_type; // data_type (unused)
mz_ulong adler; // adler32 of the source or uncompressed data
mz_ulong reserved; // not used
} mz_stream;
typedef mz_stream *mz_streamp;
// Returns the version string of miniz.c.
const char *mz_version(void);
// mz_deflateInit() initializes a compressor with default options:
// Parameters:
// pStream must point to an initialized mz_stream struct.
// level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION].
// level 1 enables a specially optimized compression function that's been
// optimized purely for performance, not ratio.
// (This special func. is currently only enabled when
// MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.)
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if the input parameters are bogus.
// MZ_MEM_ERROR on out of memory.
int mz_deflateInit(mz_streamp pStream, int level);
// mz_deflateInit2() is like mz_deflate(), except with more control:
// Additional parameters:
// method must be MZ_DEFLATED
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with
// zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no
// header or footer)
// mem_level must be between [1, 9] (it's checked but ignored by miniz.c)
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy);
// Quickly resets a compressor without having to reallocate anything. Same as
// calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2().
int mz_deflateReset(mz_streamp pStream);
// mz_deflate() compresses the input to output, consuming as much of the input
// and producing as much output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or
// MZ_FINISH.
// Return values:
// MZ_OK on success (when flushing, or if more input is needed but not
// available, and/or there's more output to be written but the output buffer
// is full).
// MZ_STREAM_END if all input has been consumed and all output bytes have been
// written. Don't call mz_deflate() on the stream anymore.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input and/or
// output buffers are empty. (Fill up the input buffer or free up some output
// space and try again.)
int mz_deflate(mz_streamp pStream, int flush);
// mz_deflateEnd() deinitializes a compressor:
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
int mz_deflateEnd(mz_streamp pStream);
// mz_deflateBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by deflate(), assuming flush is set to only
// MZ_NO_FLUSH or MZ_FINISH.
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len);
// Single-call compression functions mz_compress() and mz_compress2():
// Returns MZ_OK on success, or one of the error codes from mz_deflate() on
// failure.
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level);
// mz_compressBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by calling mz_compress().
mz_ulong mz_compressBound(mz_ulong source_len);
// Initializes a decompressor.
int mz_inflateInit(mz_streamp pStream);
// mz_inflateInit2() is like mz_inflateInit() with an additional option that
// controls the window size and whether or not the stream has been wrapped with
// a zlib header/footer:
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or
// -MZ_DEFAULT_WINDOW_BITS (raw deflate).
int mz_inflateInit2(mz_streamp pStream, int window_bits);
// Decompresses the input stream to the output, consuming only as much of the
// input as needed, and writing as much to the output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH.
// On the first call, if flush is MZ_FINISH it's assumed the input and output
// buffers are both sized large enough to decompress the entire stream in a
// single call (this is slightly faster).
// MZ_FINISH implies that there are no more source bytes available beside
// what's already in the input buffer, and that the output buffer is large
// enough to hold the rest of the decompressed data.
// Return values:
// MZ_OK on success. Either more input is needed but not available, and/or
// there's more output to be written but the output buffer is full.
// MZ_STREAM_END if all needed input has been consumed and all output bytes
// have been written. For zlib streams, the adler-32 of the decompressed data
// has also been verified.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_DATA_ERROR if the deflate stream is invalid.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input buffer is
// empty but the inflater needs more input to continue, or if the output
// buffer is not large enough. Call mz_inflate() again
// with more input data, or with more room in the output buffer (except when
// using single call decompression, described above).
int mz_inflate(mz_streamp pStream, int flush);
// Deinitializes a decompressor.
int mz_inflateEnd(mz_streamp pStream);
// Single-call decompression.
// Returns MZ_OK on success, or one of the error codes from mz_inflate() on
// failure.
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
// Returns a string description of the specified error code, or NULL if the
// error code is invalid.
const char *mz_error(int err);
// Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used
// as a drop-in replacement for the subset of zlib that miniz.c supports.
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you
// use zlib in the same project.
#ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
typedef unsigned char Byte;
typedef unsigned int uInt;
typedef mz_ulong uLong;
typedef Byte Bytef;
typedef uInt uIntf;
typedef char charf;
typedef int intf;
typedef void *voidpf;
typedef uLong uLongf;
typedef void *voidp;
typedef void *const voidpc;
#define Z_NULL 0
#define Z_NO_FLUSH MZ_NO_FLUSH
#define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH
#define Z_SYNC_FLUSH MZ_SYNC_FLUSH
#define Z_FULL_FLUSH MZ_FULL_FLUSH
#define Z_FINISH MZ_FINISH
#define Z_BLOCK MZ_BLOCK
#define Z_OK MZ_OK
#define Z_STREAM_END MZ_STREAM_END
#define Z_NEED_DICT MZ_NEED_DICT
#define Z_ERRNO MZ_ERRNO
#define Z_STREAM_ERROR MZ_STREAM_ERROR
#define Z_DATA_ERROR MZ_DATA_ERROR
#define Z_MEM_ERROR MZ_MEM_ERROR
#define Z_BUF_ERROR MZ_BUF_ERROR
#define Z_VERSION_ERROR MZ_VERSION_ERROR
#define Z_PARAM_ERROR MZ_PARAM_ERROR
#define Z_NO_COMPRESSION MZ_NO_COMPRESSION
#define Z_BEST_SPEED MZ_BEST_SPEED
#define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION
#define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION
#define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY
#define Z_FILTERED MZ_FILTERED
#define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY
#define Z_RLE MZ_RLE
#define Z_FIXED MZ_FIXED
#define Z_DEFLATED MZ_DEFLATED
#define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS
#define alloc_func mz_alloc_func
#define free_func mz_free_func
#define internal_state mz_internal_state
#define z_stream mz_stream
#define deflateInit mz_deflateInit
#define deflateInit2 mz_deflateInit2
#define deflateReset mz_deflateReset
#define deflate mz_deflate
#define deflateEnd mz_deflateEnd
#define deflateBound mz_deflateBound
#define compress mz_compress
#define compress2 mz_compress2
#define compressBound mz_compressBound
#define inflateInit mz_inflateInit
#define inflateInit2 mz_inflateInit2
#define inflate mz_inflate
#define inflateEnd mz_inflateEnd
#define uncompress mz_uncompress
#define crc32 mz_crc32
#define adler32 mz_adler32
#define MAX_WBITS 15
#define MAX_MEM_LEVEL 9
#define zError mz_error
#define ZLIB_VERSION MZ_VERSION
#define ZLIB_VERNUM MZ_VERNUM
#define ZLIB_VER_MAJOR MZ_VER_MAJOR
#define ZLIB_VER_MINOR MZ_VER_MINOR
#define ZLIB_VER_REVISION MZ_VER_REVISION
#define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION
#define zlibVersion mz_version
#define zlib_version mz_version()
#endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Types and macros
typedef unsigned char mz_uint8;
typedef signed short mz_int16;
typedef unsigned short mz_uint16;
typedef unsigned int mz_uint32;
typedef unsigned int mz_uint;
typedef long long mz_int64;
typedef unsigned long long mz_uint64;
typedef int mz_bool;
#define MZ_FALSE (0)
#define MZ_TRUE (1)
// An attempt to work around MSVC's spammy "warning C4127: conditional
// expression is constant" message.
#ifdef _MSC_VER
#define MZ_MACRO_END while (0, 0)
#else
#define MZ_MACRO_END while (0)
#endif
// ------------------- ZIP archive reading/writing
#ifndef MINIZ_NO_ARCHIVE_APIS
enum {
MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024,
MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260,
MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256
};
typedef struct {
mz_uint32 m_file_index;
mz_uint32 m_central_dir_ofs;
mz_uint16 m_version_made_by;
mz_uint16 m_version_needed;
mz_uint16 m_bit_flag;
mz_uint16 m_method;
#ifndef MINIZ_NO_TIME
time_t m_time;
#endif
mz_uint32 m_crc32;
mz_uint64 m_comp_size;
mz_uint64 m_uncomp_size;
mz_uint16 m_internal_attr;
mz_uint32 m_external_attr;
mz_uint64 m_local_header_ofs;
mz_uint32 m_comment_size;
char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE];
char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE];
} mz_zip_archive_file_stat;
typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n);
typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n);
struct mz_zip_internal_state_tag;
typedef struct mz_zip_internal_state_tag mz_zip_internal_state;
typedef enum {
MZ_ZIP_MODE_INVALID = 0,
MZ_ZIP_MODE_READING = 1,
MZ_ZIP_MODE_WRITING = 2,
MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3
} mz_zip_mode;
typedef struct mz_zip_archive_tag {
mz_uint64 m_archive_size;
mz_uint64 m_central_directory_file_ofs;
mz_uint m_total_files;
mz_zip_mode m_zip_mode;
mz_uint m_file_offset_alignment;
mz_alloc_func m_pAlloc;
mz_free_func m_pFree;
mz_realloc_func m_pRealloc;
void *m_pAlloc_opaque;
mz_file_read_func m_pRead;
mz_file_write_func m_pWrite;
void *m_pIO_opaque;
mz_zip_internal_state *m_pState;
} mz_zip_archive;
typedef enum {
MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100,
MZ_ZIP_FLAG_IGNORE_PATH = 0x0200,
MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400,
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800
} mz_zip_flags;
// ZIP archive reading
// Inits a ZIP archive reader.
// These functions read and validate the archive's central directory.
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags);
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags);
#endif
// Returns the total number of files in the archive.
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip);
// Returns detailed information about an archive file entry.
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat);
// Determines if an archive file entry is a directory entry.
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index);
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index);
// Retrieves the filename of an archive file entry.
// Returns the number of bytes written to pFilename, or if filename_buf_size is
// 0 this function returns the number of bytes needed to fully store the
// filename.
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size);
// Attempts to locates a file in the archive's central directory.
// Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH
// Returns -1 if the file cannot be found.
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
// Extracts a archive file to a memory buffer using no memory allocation.
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size);
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size);
// Extracts a archive file to a memory buffer.
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags);
// Extracts a archive file to a dynamically allocated heap buffer.
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags);
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags);
// Extracts a archive file using a callback function to output the file's data.
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
#ifndef MINIZ_NO_STDIO
// Extracts a archive file to a disk file and sets its last accessed and
// modified times.
// This function only extracts files, not archive directory records.
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags);
#endif
// Ends archive reading, freeing all allocations, and closing the input archive
// file if mz_zip_reader_init_file() was used.
mz_bool mz_zip_reader_end(mz_zip_archive *pZip);
// ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
// Inits a ZIP archive writer.
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size);
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning);
#endif
// Converts a ZIP archive reader object into a writer object, to allow efficient
// in-place file appends to occur on an existing archive.
// For archives opened using mz_zip_reader_init_file, pFilename must be the
// archive's filename so it can be reopened for writing. If the file can't be
// reopened, mz_zip_reader_end() will be called.
// For archives opened using mz_zip_reader_init_mem, the memory block must be
// growable using the realloc callback (which defaults to realloc unless you've
// overridden it).
// Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's
// user provided m_pWrite function cannot be NULL.
// Note: In-place archive modification is not recommended unless you know what
// you're doing, because if execution stops or something goes wrong before
// the archive is finalized the file's central directory will be hosed.
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename);
// Adds the contents of a memory buffer to an archive. These functions record
// the current local time into the archive.
// To add a directory entry, call this method with an archive name ending in a
// forwardslash with empty buffer.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags);
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32);
#ifndef MINIZ_NO_STDIO
// Adds the contents of a disk file to an archive. This function also records
// the disk file's modified time into the archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size, mz_uint level_and_flags);
#endif
// Adds a file to an archive by fully cloning the data from another archive.
// This function fully clones the source file's compressed data (no
// recompression), along with its full filename, extra data, and comment fields.
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index);
// Finalizes the archive by writing the central directory records followed by
// the end of central directory record.
// After an archive is finalized, the only valid call on the mz_zip_archive
// struct is mz_zip_writer_end().
// An archive must be manually finalized by calling this function for it to be
// valid.
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip);
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize);
// Ends archive writing, freeing all allocations, and closing the output file if
// mz_zip_writer_init_file() was used.
// Note for the archive to be valid, it must have been finalized before ending.
mz_bool mz_zip_writer_end(mz_zip_archive *pZip);
// Misc. high-level helper functions:
// mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically)
// appends a memory blob to a ZIP archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags);
// Reads a single file from an archive into a heap block.
// Returns NULL on failure.
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
// ------------------- Low-level Decompression API Definitions
// Decompression flags used by tinfl_decompress().
// TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and
// ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the
// input is a raw deflate stream.
// TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available
// beyond the end of the supplied input buffer. If clear, the input buffer
// contains all remaining input.
// TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large
// enough to hold the entire decompressed stream. If clear, the output buffer is
// at least the size of the dictionary (typically 32KB).
// TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the
// decompressed bytes.
enum {
TINFL_FLAG_PARSE_ZLIB_HEADER = 1,
TINFL_FLAG_HAS_MORE_INPUT = 2,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4,
TINFL_FLAG_COMPUTE_ADLER32 = 8
};
// High level decompression functions:
// tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data
// to decompress.
// On return:
// Function returns a pointer to the decompressed data, or NULL on failure.
// *pOut_len will be set to the decompressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must call mz_free() on the returned block when it's no longer
// needed.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tinfl_decompress_mem_to_mem() decompresses a block in memory to another block
// in memory.
// Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes
// written on success.
#define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1))
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// tinfl_decompress_mem_to_callback() decompresses a block in memory to an
// internal 32KB buffer, and a user provided callback function will be called to
// flush the buffer.
// Returns 1 on success or 0 on failure.
typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser);
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
struct tinfl_decompressor_tag;
typedef struct tinfl_decompressor_tag tinfl_decompressor;
// Max size of LZ dictionary.
#define TINFL_LZ_DICT_SIZE 32768
// Return status.
typedef enum {
TINFL_STATUS_BAD_PARAM = -3,
TINFL_STATUS_ADLER32_MISMATCH = -2,
TINFL_STATUS_FAILED = -1,
TINFL_STATUS_DONE = 0,
TINFL_STATUS_NEEDS_MORE_INPUT = 1,
TINFL_STATUS_HAS_MORE_OUTPUT = 2
} tinfl_status;
// Initializes the decompressor to its initial state.
#define tinfl_init(r) \
do { \
(r)->m_state = 0; \
} \
MZ_MACRO_END
#define tinfl_get_adler32(r) (r)->m_check_adler32
// Main low-level decompressor coroutine function. This is the only function
// actually needed for decompression. All the other functions are just
// high-level helpers for improved usability.
// This is a universal API, i.e. it can be used as a building block to build any
// desired higher level decompression API. In the limit case, it can be called
// once per every byte input or output.
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags);
// Internal/private bits follow.
enum {
TINFL_MAX_HUFF_TABLES = 3,
TINFL_MAX_HUFF_SYMBOLS_0 = 288,
TINFL_MAX_HUFF_SYMBOLS_1 = 32,
TINFL_MAX_HUFF_SYMBOLS_2 = 19,
TINFL_FAST_LOOKUP_BITS = 10,
TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS
};
typedef struct {
mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0];
mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE],
m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2];
} tinfl_huff_table;
#if MINIZ_HAS_64BIT_REGISTERS
#define TINFL_USE_64BIT_BITBUF 1
#endif
#if TINFL_USE_64BIT_BITBUF
typedef mz_uint64 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (64)
#else
typedef mz_uint32 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (32)
#endif
struct tinfl_decompressor_tag {
mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type,
m_check_adler32, m_dist, m_counter, m_num_extra,
m_table_sizes[TINFL_MAX_HUFF_TABLES];
tinfl_bit_buf_t m_bit_buf;
size_t m_dist_from_out_buf_start;
tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES];
mz_uint8 m_raw_header[4],
m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137];
};
// ------------------- Low-level Compression API Definitions
// Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly
// slower, and raw/dynamic blocks will be output more frequently).
#define TDEFL_LESS_MEMORY 0
// tdefl_init() compression flags logically OR'd together (low 12 bits contain
// the max. number of probes per dictionary search):
// TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes
// per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap
// compression), 4095=Huffman+LZ (slowest/best compression).
enum {
TDEFL_HUFFMAN_ONLY = 0,
TDEFL_DEFAULT_MAX_PROBES = 128,
TDEFL_MAX_PROBES_MASK = 0xFFF
};
// TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before
// the deflate data, and the Adler-32 of the source data at the end. Otherwise,
// you'll get raw deflate data.
// TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even
// when not writing zlib headers).
// TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more
// efficient lazy parsing.
// TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's
// initialization time to the minimum, but the output may vary from run to run
// given the same input (depending on the contents of memory).
// TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1)
// TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled.
// TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables.
// TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks.
// The low 12 bits are reserved to control the max # of hash probes per
// dictionary lookup (see TDEFL_MAX_PROBES_MASK).
enum {
TDEFL_WRITE_ZLIB_HEADER = 0x01000,
TDEFL_COMPUTE_ADLER32 = 0x02000,
TDEFL_GREEDY_PARSING_FLAG = 0x04000,
TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000,
TDEFL_RLE_MATCHES = 0x10000,
TDEFL_FILTER_MATCHES = 0x20000,
TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000,
TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000
};
// High level compression functions:
// tdefl_compress_mem_to_heap() compresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of source block to compress.
// flags: The max match finder probes (default is 128) logically OR'd against
// the above flags. Higher probes are slower but improve compression.
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pOut_len will be set to the compressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must free() the returned block when it's no longer needed.
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tdefl_compress_mem_to_mem() compresses a block in memory to another block in
// memory.
// Returns 0 on failure.
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// Compresses an image to a compressed PNG file in memory.
// On entry:
// pImage, w, h, and num_chans describe the image to compress. num_chans may be
// 1, 2, 3, or 4.
// The image pitch in bytes per scanline will be w*num_chans. The leftmost
// pixel on the top scanline is stored first in memory.
// level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL
// If flip is true, the image will be flipped on the Y axis (useful for OpenGL
// apps).
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pLen_out will be set to the size of the PNG image file.
// The caller must mz_free() the returned heap block (which will typically be
// larger than *pLen_out) when it's no longer needed.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip);
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out);
// Output stream interface. The compressor uses this interface to write
// compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time.
typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len,
void *pUser);
// tdefl_compress_mem_to_output() compresses a block to an output stream. The
// above helpers use this function internally.
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
enum {
TDEFL_MAX_HUFF_TABLES = 3,
TDEFL_MAX_HUFF_SYMBOLS_0 = 288,
TDEFL_MAX_HUFF_SYMBOLS_1 = 32,
TDEFL_MAX_HUFF_SYMBOLS_2 = 19,
TDEFL_LZ_DICT_SIZE = 32768,
TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1,
TDEFL_MIN_MATCH_LEN = 3,
TDEFL_MAX_MATCH_LEN = 258
};
// TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed
// output block (using static/fixed Huffman codes).
#if TDEFL_LESS_MEMORY
enum {
TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 12,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#else
enum {
TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 15,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#endif
// The low-level tdefl functions below may be used directly if the above helper
// functions aren't flexible enough. The low-level functions don't make any heap
// allocations, unlike the above helper functions.
typedef enum {
TDEFL_STATUS_BAD_PARAM = -2,
TDEFL_STATUS_PUT_BUF_FAILED = -1,
TDEFL_STATUS_OKAY = 0,
TDEFL_STATUS_DONE = 1
} tdefl_status;
// Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums
typedef enum {
TDEFL_NO_FLUSH = 0,
TDEFL_SYNC_FLUSH = 2,
TDEFL_FULL_FLUSH = 3,
TDEFL_FINISH = 4
} tdefl_flush;
// tdefl's compression state structure.
typedef struct {
tdefl_put_buf_func_ptr m_pPut_buf_func;
void *m_pPut_buf_user;
mz_uint m_flags, m_max_probes[2];
int m_greedy_parsing;
mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size;
mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end;
mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in,
m_bit_buffer;
mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit,
m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index,
m_wants_to_finish;
tdefl_status m_prev_return_status;
const void *m_pIn_buf;
void *m_pOut_buf;
size_t *m_pIn_buf_size, *m_pOut_buf_size;
tdefl_flush m_flush;
const mz_uint8 *m_pSrc;
size_t m_src_buf_left, m_out_buf_ofs;
mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1];
mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE];
mz_uint16 m_next[TDEFL_LZ_DICT_SIZE];
mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE];
mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE];
} tdefl_compressor;
// Initializes the compressor.
// There is no corresponding deinit() function because the tdefl API's do not
// dynamically allocate memory.
// pBut_buf_func: If NULL, output data will be supplied to the specified
// callback. In this case, the user should call the tdefl_compress_buffer() API
// for compression.
// If pBut_buf_func is NULL the user should always call the tdefl_compress()
// API.
// flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER,
// etc.)
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
// Compresses a block of data, consuming as much of the specified input buffer
// as possible, and writing as much compressed data to the specified output
// buffer as possible.
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush);
// tdefl_compress_buffer() is only usable when the tdefl_init() is called with a
// non-NULL tdefl_put_buf_func_ptr.
// tdefl_compress_buffer() always consumes the entire input buffer.
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush);
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d);
mz_uint32 tdefl_get_adler32(tdefl_compressor *d);
// Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't
// defined, because it uses some of its macros.
#ifndef MINIZ_NO_ZLIB_APIS
// Create tdefl_compress() flags given zlib-style compression parameters.
// level may range from [0,10] (where 10 is absolute max compression, but may be
// much slower on some files)
// window_bits may be -15 (raw deflate) or 15 (zlib)
// strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY,
// MZ_RLE, or MZ_FIXED
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy);
#endif // #ifndef MINIZ_NO_ZLIB_APIS
#ifdef __cplusplus
}
#endif
#endif // MINIZ_HEADER_INCLUDED
// ------------------- End of Header: Implementation follows. (If you only want
// the header, define MINIZ_HEADER_FILE_ONLY.)
#ifndef MINIZ_HEADER_FILE_ONLY
typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1];
typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1];
typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1];
//#include <assert.h>
//#include <string.h>
#define MZ_ASSERT(x) assert(x)
#ifdef MINIZ_NO_MALLOC
#define MZ_MALLOC(x) NULL
#define MZ_FREE(x) (void)x, ((void)0)
#define MZ_REALLOC(p, x) NULL
#else
#define MZ_MALLOC(x) malloc(x)
#define MZ_FREE(x) free(x)
#define MZ_REALLOC(p, x) realloc(p, x)
#endif
#define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b))
#define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj))
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
#define MZ_READ_LE16(p) *((const mz_uint16 *)(p))
#define MZ_READ_LE32(p) *((const mz_uint32 *)(p))
#else
#define MZ_READ_LE16(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U))
#define MZ_READ_LE32(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \
((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \
((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U))
#endif
#ifdef _MSC_VER
#define MZ_FORCEINLINE __forceinline
#elif defined(__GNUC__)
#define MZ_FORCEINLINE inline __attribute__((__always_inline__))
#else
#define MZ_FORCEINLINE inline
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API's
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) {
mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16);
size_t block_len = buf_len % 5552;
if (!ptr) return MZ_ADLER32_INIT;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
return (s2 << 16) + s1;
}
// Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C
// implementation that balances processor cache usage against speed":
// http://www.geocities.com/malbrain/
mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) {
static const mz_uint32 s_crc32[16] = {
0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4,
0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c};
mz_uint32 crcu32 = (mz_uint32)crc;
if (!ptr) return MZ_CRC32_INIT;
crcu32 = ~crcu32;
while (buf_len--) {
mz_uint8 b = *ptr++;
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)];
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)];
}
return ~crcu32;
}
void mz_free(void *p) { MZ_FREE(p); }
#ifndef MINIZ_NO_ZLIB_APIS
static void *def_alloc_func(void *opaque, size_t items, size_t size) {
(void)opaque, (void)items, (void)size;
return MZ_MALLOC(items * size);
}
static void def_free_func(void *opaque, void *address) {
(void)opaque, (void)address;
MZ_FREE(address);
}
// static void *def_realloc_func(void *opaque, void *address, size_t items,
// size_t size) {
// (void)opaque, (void)address, (void)items, (void)size;
// return MZ_REALLOC(address, items * size);
//}
const char *mz_version(void) { return MZ_VERSION; }
int mz_deflateInit(mz_streamp pStream, int level) {
return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9,
MZ_DEFAULT_STRATEGY);
}
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy) {
tdefl_compressor *pComp;
mz_uint comp_flags =
TDEFL_COMPUTE_ADLER32 |
tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy);
if (!pStream) return MZ_STREAM_ERROR;
if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) ||
((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS)))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = MZ_ADLER32_INIT;
pStream->msg = NULL;
pStream->reserved = 0;
pStream->total_in = 0;
pStream->total_out = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1,
sizeof(tdefl_compressor));
if (!pComp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pComp;
if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) {
mz_deflateEnd(pStream);
return MZ_PARAM_ERROR;
}
return MZ_OK;
}
int mz_deflateReset(mz_streamp pStream) {
if ((!pStream) || (!pStream->state) || (!pStream->zalloc) ||
(!pStream->zfree))
return MZ_STREAM_ERROR;
pStream->total_in = pStream->total_out = 0;
tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL,
((tdefl_compressor *)pStream->state)->m_flags);
return MZ_OK;
}
int mz_deflate(mz_streamp pStream, int flush) {
size_t in_bytes, out_bytes;
mz_ulong orig_total_in, orig_total_out;
int mz_status = MZ_OK;
if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) ||
(!pStream->next_out))
return MZ_STREAM_ERROR;
if (!pStream->avail_out) return MZ_BUF_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if (((tdefl_compressor *)pStream->state)->m_prev_return_status ==
TDEFL_STATUS_DONE)
return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR;
orig_total_in = pStream->total_in;
orig_total_out = pStream->total_out;
for (;;) {
tdefl_status defl_status;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
defl_status = tdefl_compress((tdefl_compressor *)pStream->state,
pStream->next_in, &in_bytes, pStream->next_out,
&out_bytes, (tdefl_flush)flush);
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (defl_status < 0) {
mz_status = MZ_STREAM_ERROR;
break;
} else if (defl_status == TDEFL_STATUS_DONE) {
mz_status = MZ_STREAM_END;
break;
} else if (!pStream->avail_out)
break;
else if ((!pStream->avail_in) && (flush != MZ_FINISH)) {
if ((flush) || (pStream->total_in != orig_total_in) ||
(pStream->total_out != orig_total_out))
break;
return MZ_BUF_ERROR; // Can't make forward progress without some input.
}
}
return mz_status;
}
int mz_deflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) {
(void)pStream;
// This is really over conservative. (And lame, but it's actually pretty
// tricky to compute a true upper bound given the way tdefl's blocking works.)
return MZ_MAX(128 + (source_len * 110) / 100,
128 + source_len + ((source_len / (31 * 1024)) + 1) * 5);
}
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level) {
int status;
mz_stream stream;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_deflateInit(&stream, level);
if (status != MZ_OK) return status;
status = mz_deflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_deflateEnd(&stream);
return (status == MZ_OK) ? MZ_BUF_ERROR : status;
}
*pDest_len = stream.total_out;
return mz_deflateEnd(&stream);
}
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
return mz_compress2(pDest, pDest_len, pSource, source_len,
MZ_DEFAULT_COMPRESSION);
}
mz_ulong mz_compressBound(mz_ulong source_len) {
return mz_deflateBound(NULL, source_len);
}
typedef struct {
tinfl_decompressor m_decomp;
mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed;
int m_window_bits;
mz_uint8 m_dict[TINFL_LZ_DICT_SIZE];
tinfl_status m_last_status;
} inflate_state;
int mz_inflateInit2(mz_streamp pStream, int window_bits) {
inflate_state *pDecomp;
if (!pStream) return MZ_STREAM_ERROR;
if ((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = 0;
pStream->msg = NULL;
pStream->total_in = 0;
pStream->total_out = 0;
pStream->reserved = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1,
sizeof(inflate_state));
if (!pDecomp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pDecomp;
tinfl_init(&pDecomp->m_decomp);
pDecomp->m_dict_ofs = 0;
pDecomp->m_dict_avail = 0;
pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT;
pDecomp->m_first_call = 1;
pDecomp->m_has_flushed = 0;
pDecomp->m_window_bits = window_bits;
return MZ_OK;
}
int mz_inflateInit(mz_streamp pStream) {
return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS);
}
int mz_inflate(mz_streamp pStream, int flush) {
inflate_state *pState;
mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32;
size_t in_bytes, out_bytes, orig_avail_in;
tinfl_status status;
if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH))
return MZ_STREAM_ERROR;
pState = (inflate_state *)pStream->state;
if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER;
orig_avail_in = pStream->avail_in;
first_call = pState->m_first_call;
pState->m_first_call = 0;
if (pState->m_last_status < 0) return MZ_DATA_ERROR;
if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR;
pState->m_has_flushed |= (flush == MZ_FINISH);
if ((flush == MZ_FINISH) && (first_call)) {
// MZ_FINISH on the first call implies that the input and output buffers are
// large enough to hold the entire compressed/decompressed file.
decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes,
pStream->next_out, pStream->next_out, &out_bytes,
decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (status < 0)
return MZ_DATA_ERROR;
else if (status != TINFL_STATUS_DONE) {
pState->m_last_status = TINFL_STATUS_FAILED;
return MZ_BUF_ERROR;
}
return MZ_STREAM_END;
}
// flush != MZ_FINISH then we must assume there's more input.
if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT;
if (pState->m_dict_avail) {
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
return ((pState->m_last_status == TINFL_STATUS_DONE) &&
(!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
for (;;) {
in_bytes = pStream->avail_in;
out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs;
status = tinfl_decompress(
&pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict,
pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pState->m_dict_avail = (mz_uint)out_bytes;
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
if (status < 0)
return MZ_DATA_ERROR; // Stream is corrupted (there could be some
// uncompressed data left in the output dictionary -
// oh well).
else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in))
return MZ_BUF_ERROR; // Signal caller that we can't make forward progress
// without supplying more input or by setting flush
// to MZ_FINISH.
else if (flush == MZ_FINISH) {
// The output buffer MUST be large to hold the remaining uncompressed data
// when flush==MZ_FINISH.
if (status == TINFL_STATUS_DONE)
return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END;
// status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's
// at least 1 more byte on the way. If there's no more room left in the
// output buffer then something is wrong.
else if (!pStream->avail_out)
return MZ_BUF_ERROR;
} else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) ||
(!pStream->avail_out) || (pState->m_dict_avail))
break;
}
return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
int mz_inflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
mz_stream stream;
int status;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_inflateInit(&stream);
if (status != MZ_OK) return status;
status = mz_inflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_inflateEnd(&stream);
return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR
: status;
}
*pDest_len = stream.total_out;
return mz_inflateEnd(&stream);
}
const char *mz_error(int err) {
static struct {
int m_err;
const char *m_pDesc;
} s_error_descs[] = {{MZ_OK, ""},
{MZ_STREAM_END, "stream end"},
{MZ_NEED_DICT, "need dictionary"},
{MZ_ERRNO, "file error"},
{MZ_STREAM_ERROR, "stream error"},
{MZ_DATA_ERROR, "data error"},
{MZ_MEM_ERROR, "out of memory"},
{MZ_BUF_ERROR, "buf error"},
{MZ_VERSION_ERROR, "version error"},
{MZ_PARAM_ERROR, "parameter error"}};
mz_uint i;
for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i)
if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc;
return NULL;
}
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Low-level Decompression (completely independent from all
// compression API's)
#define TINFL_MEMCPY(d, s, l) memcpy(d, s, l)
#define TINFL_MEMSET(p, c, l) memset(p, c, l)
#define TINFL_CR_BEGIN \
switch (r->m_state) { \
case 0:
#define TINFL_CR_RETURN(state_index, result) \
do { \
status = result; \
r->m_state = state_index; \
goto common_exit; \
case state_index:; \
} \
MZ_MACRO_END
#define TINFL_CR_RETURN_FOREVER(state_index, result) \
do { \
for (;;) { \
TINFL_CR_RETURN(state_index, result); \
} \
} \
MZ_MACRO_END
#define TINFL_CR_FINISH }
// TODO: If the caller has indicated that there's no more input, and we attempt
// to read beyond the input buf, then something is wrong with the input because
// the inflator never
// reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of
// the stream with 0's in this scenario.
#define TINFL_GET_BYTE(state_index, c) \
do { \
if (pIn_buf_cur >= pIn_buf_end) { \
for (;;) { \
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \
TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \
if (pIn_buf_cur < pIn_buf_end) { \
c = *pIn_buf_cur++; \
break; \
} \
} else { \
c = 0; \
break; \
} \
} \
} else \
c = *pIn_buf_cur++; \
} \
MZ_MACRO_END
#define TINFL_NEED_BITS(state_index, n) \
do { \
mz_uint c; \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < (mz_uint)(n))
#define TINFL_SKIP_BITS(state_index, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
#define TINFL_GET_BITS(state_index, b, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
b = bit_buf & ((1 << (n)) - 1); \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
// TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes
// remaining in the input buffer falls below 2.
// It reads just enough bytes from the input stream that are needed to decode
// the next Huffman code (and absolutely no more). It works by trying to fully
// decode a
// Huffman code by using whatever bits are currently present in the bit buffer.
// If this fails, it reads another byte, and tries again until it succeeds or
// until the
// bit buffer contains >=15 bits (deflate's max. Huffman code size).
#define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \
do { \
temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \
if (temp >= 0) { \
code_len = temp >> 9; \
if ((code_len) && (num_bits >= code_len)) break; \
} else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while ((temp < 0) && (num_bits >= (code_len + 1))); \
if (temp >= 0) break; \
} \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < 15);
// TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex
// than you would initially expect because the zlib API expects the decompressor
// to never read
// beyond the final byte of the deflate stream. (In other words, when this macro
// wants to read another byte from the input, it REALLY needs another byte in
// order to fully
// decode the next Huffman code.) Handling this properly is particularly
// important on raw deflate (non-zlib) streams, which aren't followed by a byte
// aligned adler-32.
// The slow path is only executed at the very end of the input buffer.
#define TINFL_HUFF_DECODE(state_index, sym, pHuff) \
do { \
int temp; \
mz_uint code_len, c; \
if (num_bits < 15) { \
if ((pIn_buf_end - pIn_buf_cur) < 2) { \
TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \
} else { \
bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \
(((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \
pIn_buf_cur += 2; \
num_bits += 16; \
} \
} \
if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \
0) \
code_len = temp >> 9, temp &= 511; \
else { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while (temp < 0); \
} \
sym = temp; \
bit_buf >>= code_len; \
num_bits -= code_len; \
} \
MZ_MACRO_END
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags) {
static const int s_length_base[31] = {
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4,
4, 4, 5, 5, 5, 5, 0, 0, 0};
static const int s_dist_base[32] = {
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33,
49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537,
2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0};
static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13};
static const mz_uint8 s_length_dezigzag[19] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static const int s_min_table_sizes[3] = {257, 1, 4};
tinfl_status status = TINFL_STATUS_FAILED;
mz_uint32 num_bits, dist, counter, num_extra;
tinfl_bit_buf_t bit_buf;
const mz_uint8 *pIn_buf_cur = pIn_buf_next,
*const pIn_buf_end = pIn_buf_next + *pIn_buf_size;
mz_uint8 *pOut_buf_cur = pOut_buf_next,
*const pOut_buf_end = pOut_buf_next + *pOut_buf_size;
size_t out_buf_size_mask =
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)
? (size_t)-1
: ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1,
dist_from_out_buf_start;
// Ensure the output buffer's size is a power of 2, unless the output buffer
// is large enough to hold the entire output file (in which case it doesn't
// matter).
if (((out_buf_size_mask + 1) & out_buf_size_mask) ||
(pOut_buf_next < pOut_buf_start)) {
*pIn_buf_size = *pOut_buf_size = 0;
return TINFL_STATUS_BAD_PARAM;
}
num_bits = r->m_num_bits;
bit_buf = r->m_bit_buf;
dist = r->m_dist;
counter = r->m_counter;
num_extra = r->m_num_extra;
dist_from_out_buf_start = r->m_dist_from_out_buf_start;
TINFL_CR_BEGIN
bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0;
r->m_z_adler32 = r->m_check_adler32 = 1;
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_GET_BYTE(1, r->m_zhdr0);
TINFL_GET_BYTE(2, r->m_zhdr1);
counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) ||
(r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8));
if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) ||
((out_buf_size_mask + 1) <
(size_t)(1ULL << (8U + (r->m_zhdr0 >> 4)))));
if (counter) {
TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED);
}
}
do {
TINFL_GET_BITS(3, r->m_final, 3);
r->m_type = r->m_final >> 1;
if (r->m_type == 0) {
TINFL_SKIP_BITS(5, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
if (num_bits)
TINFL_GET_BITS(6, r->m_raw_header[counter], 8);
else
TINFL_GET_BYTE(7, r->m_raw_header[counter]);
}
if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) !=
(mz_uint)(0xFFFF ^
(r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) {
TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED);
}
while ((counter) && (num_bits)) {
TINFL_GET_BITS(51, dist, 8);
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)dist;
counter--;
}
while (counter) {
size_t n;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT);
}
while (pIn_buf_cur >= pIn_buf_end) {
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) {
TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT);
} else {
TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED);
}
}
n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur),
(size_t)(pIn_buf_end - pIn_buf_cur)),
counter);
TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n);
pIn_buf_cur += n;
pOut_buf_cur += n;
counter -= (mz_uint)n;
}
} else if (r->m_type == 3) {
TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED);
} else {
if (r->m_type == 1) {
mz_uint8 *p = r->m_tables[0].m_code_size;
mz_uint i;
r->m_table_sizes[0] = 288;
r->m_table_sizes[1] = 32;
TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32);
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
} else {
for (counter = 0; counter < 3; counter++) {
TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]);
r->m_table_sizes[counter] += s_min_table_sizes[counter];
}
MZ_CLEAR_OBJ(r->m_tables[2].m_code_size);
for (counter = 0; counter < r->m_table_sizes[2]; counter++) {
mz_uint s;
TINFL_GET_BITS(14, s, 3);
r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s;
}
r->m_table_sizes[2] = 19;
}
for (; (int)r->m_type >= 0; r->m_type--) {
int tree_next, tree_cur;
tinfl_huff_table *pTable;
mz_uint i, j, used_syms, total, sym_index, next_code[17],
total_syms[16];
pTable = &r->m_tables[r->m_type];
MZ_CLEAR_OBJ(total_syms);
MZ_CLEAR_OBJ(pTable->m_look_up);
MZ_CLEAR_OBJ(pTable->m_tree);
for (i = 0; i < r->m_table_sizes[r->m_type]; ++i)
total_syms[pTable->m_code_size[i]]++;
used_syms = 0, total = 0;
next_code[0] = next_code[1] = 0;
for (i = 1; i <= 15; ++i) {
used_syms += total_syms[i];
next_code[i + 1] = (total = ((total + total_syms[i]) << 1));
}
if ((65536 != total) && (used_syms > 1)) {
TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED);
}
for (tree_next = -1, sym_index = 0;
sym_index < r->m_table_sizes[r->m_type]; ++sym_index) {
mz_uint rev_code = 0, l, cur_code,
code_size = pTable->m_code_size[sym_index];
if (!code_size) continue;
cur_code = next_code[code_size]++;
for (l = code_size; l > 0; l--, cur_code >>= 1)
rev_code = (rev_code << 1) | (cur_code & 1);
if (code_size <= TINFL_FAST_LOOKUP_BITS) {
mz_int16 k = (mz_int16)((code_size << 9) | sym_index);
while (rev_code < TINFL_FAST_LOOKUP_SIZE) {
pTable->m_look_up[rev_code] = k;
rev_code += (1 << code_size);
}
continue;
}
if (0 ==
(tree_cur = pTable->m_look_up[rev_code &
(TINFL_FAST_LOOKUP_SIZE - 1)])) {
pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] =
(mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
}
rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1);
for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) {
tree_cur -= ((rev_code >>= 1) & 1);
if (!pTable->m_tree[-tree_cur - 1]) {
pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
} else
tree_cur = pTable->m_tree[-tree_cur - 1];
}
tree_cur -= ((rev_code >>= 1) & 1);
pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index;
}
if (r->m_type == 2) {
for (counter = 0;
counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) {
mz_uint s;
TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]);
if (dist < 16) {
r->m_len_codes[counter++] = (mz_uint8)dist;
continue;
}
if ((dist == 16) && (!counter)) {
TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED);
}
num_extra = "\02\03\07"[dist - 16];
TINFL_GET_BITS(18, s, num_extra);
s += "\03\03\013"[dist - 16];
TINFL_MEMSET(r->m_len_codes + counter,
(dist == 16) ? r->m_len_codes[counter - 1] : 0, s);
counter += s;
}
if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) {
TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED);
}
TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes,
r->m_table_sizes[0]);
TINFL_MEMCPY(r->m_tables[1].m_code_size,
r->m_len_codes + r->m_table_sizes[0],
r->m_table_sizes[1]);
}
}
for (;;) {
mz_uint8 *pSrc;
for (;;) {
if (((pIn_buf_end - pIn_buf_cur) < 4) ||
((pOut_buf_end - pOut_buf_cur) < 2)) {
TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]);
if (counter >= 256) break;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)counter;
} else {
int sym2;
mz_uint code_len;
#if TINFL_USE_64BIT_BITBUF
if (num_bits < 30) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 4;
num_bits += 32;
}
#else
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
counter = sym2;
bit_buf >>= code_len;
num_bits -= code_len;
if (counter & 256) break;
#if !TINFL_USE_64BIT_BITBUF
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
bit_buf >>= code_len;
num_bits -= code_len;
pOut_buf_cur[0] = (mz_uint8)counter;
if (sym2 & 256) {
pOut_buf_cur++;
counter = sym2;
break;
}
pOut_buf_cur[1] = (mz_uint8)sym2;
pOut_buf_cur += 2;
}
}
if ((counter &= 511) == 256) break;
num_extra = s_length_extra[counter - 257];
counter = s_length_base[counter - 257];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(25, extra_bits, num_extra);
counter += extra_bits;
}
TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]);
num_extra = s_dist_extra[dist];
dist = s_dist_base[dist];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(27, extra_bits, num_extra);
dist += extra_bits;
}
dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start;
if ((dist > dist_from_out_buf_start) &&
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) {
TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED);
}
pSrc = pOut_buf_start +
((dist_from_out_buf_start - dist) & out_buf_size_mask);
if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) {
while (counter--) {
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ =
pOut_buf_start[(dist_from_out_buf_start++ - dist) &
out_buf_size_mask];
}
continue;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
else if ((counter >= 9) && (counter <= dist)) {
const mz_uint8 *pSrc_end = pSrc + (counter & ~7);
do {
((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0];
((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1];
pOut_buf_cur += 8;
} while ((pSrc += 8) < pSrc_end);
if ((counter &= 7) < 3) {
if (counter) {
pOut_buf_cur[0] = pSrc[0];
if (counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
continue;
}
}
#endif
do {
pOut_buf_cur[0] = pSrc[0];
pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur[2] = pSrc[2];
pOut_buf_cur += 3;
pSrc += 3;
} while ((int)(counter -= 3) > 2);
if ((int)counter > 0) {
pOut_buf_cur[0] = pSrc[0];
if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
}
}
} while (!(r->m_final & 1));
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_SKIP_BITS(32, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
mz_uint s;
if (num_bits)
TINFL_GET_BITS(41, s, 8);
else
TINFL_GET_BYTE(42, s);
r->m_z_adler32 = (r->m_z_adler32 << 8) | s;
}
}
TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE);
TINFL_CR_FINISH
common_exit:
r->m_num_bits = num_bits;
r->m_bit_buf = bit_buf;
r->m_dist = dist;
r->m_counter = counter;
r->m_num_extra = num_extra;
r->m_dist_from_out_buf_start = dist_from_out_buf_start;
*pIn_buf_size = pIn_buf_cur - pIn_buf_next;
*pOut_buf_size = pOut_buf_cur - pOut_buf_next;
if ((decomp_flags &
(TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) &&
(status >= 0)) {
const mz_uint8 *ptr = pOut_buf_next;
size_t buf_len = *pOut_buf_size;
mz_uint32 i, s1 = r->m_check_adler32 & 0xffff,
s2 = r->m_check_adler32 >> 16;
size_t block_len = buf_len % 5552;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
r->m_check_adler32 = (s2 << 16) + s1;
if ((status == TINFL_STATUS_DONE) &&
(decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) &&
(r->m_check_adler32 != r->m_z_adler32))
status = TINFL_STATUS_ADLER32_MISMATCH;
}
return status;
}
// Higher level helper functions.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tinfl_decompressor decomp;
void *pBuf = NULL, *pNew_buf;
size_t src_buf_ofs = 0, out_buf_capacity = 0;
*pOut_len = 0;
tinfl_init(&decomp);
for (;;) {
size_t src_buf_size = src_buf_len - src_buf_ofs,
dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity;
tinfl_status status = tinfl_decompress(
&decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size,
(mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL,
&dst_buf_size,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
src_buf_ofs += src_buf_size;
*pOut_len += dst_buf_size;
if (status == TINFL_STATUS_DONE) break;
new_out_buf_capacity = out_buf_capacity * 2;
if (new_out_buf_capacity < 128) new_out_buf_capacity = 128;
pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity);
if (!pNew_buf) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
pBuf = pNew_buf;
out_buf_capacity = new_out_buf_capacity;
}
return pBuf;
}
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tinfl_decompressor decomp;
tinfl_status status;
tinfl_init(&decomp);
status =
tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len,
(mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED
: out_buf_len;
}
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
int result = 0;
tinfl_decompressor decomp;
mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE);
size_t in_buf_ofs = 0, dict_ofs = 0;
if (!pDict) return TINFL_STATUS_FAILED;
tinfl_init(&decomp);
for (;;) {
size_t in_buf_size = *pIn_buf_size - in_buf_ofs,
dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs;
tinfl_status status =
tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs,
&in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size,
(flags &
~(TINFL_FLAG_HAS_MORE_INPUT |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)));
in_buf_ofs += in_buf_size;
if ((dst_buf_size) &&
(!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user)))
break;
if (status != TINFL_STATUS_HAS_MORE_OUTPUT) {
result = (status == TINFL_STATUS_DONE);
break;
}
dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1);
}
MZ_FREE(pDict);
*pIn_buf_size = in_buf_ofs;
return result;
}
// ------------------- Low-level Compression (independent from all decompression
// API's)
// Purposely making these tables static for faster init and thread safety.
static const mz_uint16 s_tdefl_len_sym[256] = {
257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268,
268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272,
272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274,
274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276,
276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279,
279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280,
280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
285};
static const mz_uint8 s_tdefl_len_extra[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0};
static const mz_uint8 s_tdefl_small_dist_sym[512] = {
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8,
8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17};
static const mz_uint8 s_tdefl_small_dist_extra[512] = {
0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7};
static const mz_uint8 s_tdefl_large_dist_sym[128] = {
0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24,
24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29};
static const mz_uint8 s_tdefl_large_dist_extra[128] = {
0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13};
// Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted
// values.
typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq;
static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms,
tdefl_sym_freq *pSyms0,
tdefl_sym_freq *pSyms1) {
mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2];
tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1;
MZ_CLEAR_OBJ(hist);
for (i = 0; i < num_syms; i++) {
mz_uint freq = pSyms0[i].m_key;
hist[freq & 0xFF]++;
hist[256 + ((freq >> 8) & 0xFF)]++;
}
while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256]))
total_passes--;
for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) {
const mz_uint32 *pHist = &hist[pass << 8];
mz_uint offsets[256], cur_ofs = 0;
for (i = 0; i < 256; i++) {
offsets[i] = cur_ofs;
cur_ofs += pHist[i];
}
for (i = 0; i < num_syms; i++)
pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] =
pCur_syms[i];
{
tdefl_sym_freq *t = pCur_syms;
pCur_syms = pNew_syms;
pNew_syms = t;
}
}
return pCur_syms;
}
// tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat,
// alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996.
static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) {
int root, leaf, next, avbl, used, dpth;
if (n == 0)
return;
else if (n == 1) {
A[0].m_key = 1;
return;
}
A[0].m_key += A[1].m_key;
root = 0;
leaf = 2;
for (next = 1; next < n - 1; next++) {
if (leaf >= n || A[root].m_key < A[leaf].m_key) {
A[next].m_key = A[root].m_key;
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = A[leaf++].m_key;
if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) {
A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key);
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key);
}
A[n - 2].m_key = 0;
for (next = n - 3; next >= 0; next--)
A[next].m_key = A[A[next].m_key].m_key + 1;
avbl = 1;
used = dpth = 0;
root = n - 2;
next = n - 1;
while (avbl > 0) {
while (root >= 0 && (int)A[root].m_key == dpth) {
used++;
root--;
}
while (avbl > used) {
A[next--].m_key = (mz_uint16)(dpth);
avbl--;
}
avbl = 2 * used;
dpth++;
used = 0;
}
}
// Limits canonical Huffman code table's max code size.
enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 };
static void tdefl_huffman_enforce_max_code_size(int *pNum_codes,
int code_list_len,
int max_code_size) {
int i;
mz_uint32 total = 0;
if (code_list_len <= 1) return;
for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++)
pNum_codes[max_code_size] += pNum_codes[i];
for (i = max_code_size; i > 0; i--)
total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i));
while (total != (1UL << max_code_size)) {
pNum_codes[max_code_size]--;
for (i = max_code_size - 1; i > 0; i--)
if (pNum_codes[i]) {
pNum_codes[i]--;
pNum_codes[i + 1] += 2;
break;
}
total--;
}
}
static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num,
int table_len, int code_size_limit,
int static_table) {
int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE];
mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1];
MZ_CLEAR_OBJ(num_codes);
if (static_table) {
for (i = 0; i < table_len; i++)
num_codes[d->m_huff_code_sizes[table_num][i]]++;
} else {
tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS],
*pSyms;
int num_used_syms = 0;
const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0];
for (i = 0; i < table_len; i++)
if (pSym_count[i]) {
syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i];
syms0[num_used_syms++].m_sym_index = (mz_uint16)i;
}
pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1);
tdefl_calculate_minimum_redundancy(pSyms, num_used_syms);
for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++;
tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms,
code_size_limit);
MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]);
MZ_CLEAR_OBJ(d->m_huff_codes[table_num]);
for (i = 1, j = num_used_syms; i <= code_size_limit; i++)
for (l = num_codes[i]; l > 0; l--)
d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i);
}
next_code[1] = 0;
for (j = 0, i = 2; i <= code_size_limit; i++)
next_code[i] = j = ((j + num_codes[i - 1]) << 1);
for (i = 0; i < table_len; i++) {
mz_uint rev_code = 0, code, code_size;
if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue;
code = next_code[code_size]++;
for (l = code_size; l > 0; l--, code >>= 1)
rev_code = (rev_code << 1) | (code & 1);
d->m_huff_codes[table_num][i] = (mz_uint16)rev_code;
}
}
#define TDEFL_PUT_BITS(b, l) \
do { \
mz_uint bits = b; \
mz_uint len = l; \
MZ_ASSERT(bits <= ((1U << len) - 1U)); \
d->m_bit_buffer |= (bits << d->m_bits_in); \
d->m_bits_in += len; \
while (d->m_bits_in >= 8) { \
if (d->m_pOutput_buf < d->m_pOutput_buf_end) \
*d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \
d->m_bit_buffer >>= 8; \
d->m_bits_in -= 8; \
} \
} \
MZ_MACRO_END
#define TDEFL_RLE_PREV_CODE_SIZE() \
{ \
if (rle_repeat_count) { \
if (rle_repeat_count < 3) { \
d->m_huff_count[2][prev_code_size] = (mz_uint16)( \
d->m_huff_count[2][prev_code_size] + rle_repeat_count); \
while (rle_repeat_count--) \
packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \
} else { \
d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 16; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_repeat_count - 3); \
} \
rle_repeat_count = 0; \
} \
}
#define TDEFL_RLE_ZERO_CODE_SIZE() \
{ \
if (rle_z_count) { \
if (rle_z_count < 3) { \
d->m_huff_count[2][0] = \
(mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \
while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \
} else if (rle_z_count <= 10) { \
d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 17; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 3); \
} else { \
d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 18; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 11); \
} \
rle_z_count = 0; \
} \
}
static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static void tdefl_start_dynamic_block(tdefl_compressor *d) {
int num_lit_codes, num_dist_codes, num_bit_lengths;
mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count,
rle_repeat_count, packed_code_sizes_index;
mz_uint8
code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
prev_code_size = 0xFF;
d->m_huff_count[0][256] = 1;
tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE);
tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE);
for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--)
if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break;
for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--)
if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break;
memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes);
memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0],
num_dist_codes);
total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
num_packed_code_sizes = 0;
rle_z_count = 0;
rle_repeat_count = 0;
memset(&d->m_huff_count[2][0], 0,
sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2);
for (i = 0; i < total_code_sizes_to_pack; i++) {
mz_uint8 code_size = code_sizes_to_pack[i];
if (!code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
if (++rle_z_count == 138) {
TDEFL_RLE_ZERO_CODE_SIZE();
}
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
if (code_size != prev_code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
d->m_huff_count[2][code_size] =
(mz_uint16)(d->m_huff_count[2][code_size] + 1);
packed_code_sizes[num_packed_code_sizes++] = code_size;
} else if (++rle_repeat_count == 6) {
TDEFL_RLE_PREV_CODE_SIZE();
}
}
prev_code_size = code_size;
}
if (rle_repeat_count) {
TDEFL_RLE_PREV_CODE_SIZE();
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
}
tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE);
TDEFL_PUT_BITS(2, 2);
TDEFL_PUT_BITS(num_lit_codes - 257, 5);
TDEFL_PUT_BITS(num_dist_codes - 1, 5);
for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--)
if (d->m_huff_code_sizes
[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]])
break;
num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1));
TDEFL_PUT_BITS(num_bit_lengths - 4, 4);
for (i = 0; (int)i < num_bit_lengths; i++)
TDEFL_PUT_BITS(
d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3);
for (packed_code_sizes_index = 0;
packed_code_sizes_index < num_packed_code_sizes;) {
mz_uint code = packed_code_sizes[packed_code_sizes_index++];
MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2);
TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]);
if (code >= 16)
TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++],
"\02\03\07"[code - 16]);
}
}
static void tdefl_start_static_block(tdefl_compressor *d) {
mz_uint i;
mz_uint8 *p = &d->m_huff_code_sizes[0][0];
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
memset(d->m_huff_code_sizes[1], 5, 32);
tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE);
tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE);
TDEFL_PUT_BITS(1, 2);
}
static const mz_uint mz_bitmasks[17] = {
0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF,
0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF};
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \
MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
mz_uint8 *pOutput_buf = d->m_pOutput_buf;
mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf;
mz_uint64 bit_buffer = d->m_bit_buffer;
mz_uint bits_in = d->m_bits_in;
#define TDEFL_PUT_BITS_FAST(b, l) \
{ \
bit_buffer |= (((mz_uint64)(b)) << bits_in); \
bits_in += (l); \
}
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint s0, s1, n0, n1, sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = *(const mz_uint16 *)(pLZ_codes + 1);
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
// This sequence coaxes MSVC into using cmov's vs. jmp's.
s0 = s_tdefl_small_dist_sym[match_dist & 511];
n0 = s_tdefl_small_dist_extra[match_dist & 511];
s1 = s_tdefl_large_dist_sym[match_dist >> 8];
n1 = s_tdefl_large_dist_extra[match_dist >> 8];
sym = (match_dist < 512) ? s0 : s1;
num_extra_bits = (match_dist < 512) ? n0 : n1;
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym],
d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits],
num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
}
}
}
if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE;
*(mz_uint64 *)pOutput_buf = bit_buffer;
pOutput_buf += (bits_in >> 3);
bit_buffer >>= (bits_in & ~7);
bits_in &= 7;
}
#undef TDEFL_PUT_BITS_FAST
d->m_pOutput_buf = pOutput_buf;
d->m_bits_in = 0;
d->m_bit_buffer = 0;
while (bits_in) {
mz_uint32 n = MZ_MIN(bits_in, 16);
TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n);
bit_buffer >>= n;
bits_in -= n;
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#else
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8));
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
if (match_dist < 512) {
sym = s_tdefl_small_dist_sym[match_dist];
num_extra_bits = s_tdefl_small_dist_extra[match_dist];
} else {
sym = s_tdefl_large_dist_sym[match_dist >> 8];
num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8];
}
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
}
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN &&
// MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) {
if (static_block)
tdefl_start_static_block(d);
else
tdefl_start_dynamic_block(d);
return tdefl_compress_lz_codes(d);
}
static int tdefl_flush_block(tdefl_compressor *d, int flush) {
mz_uint saved_bit_buf, saved_bits_in;
mz_uint8 *pSaved_output_buf;
mz_bool comp_block_succeeded = MZ_FALSE;
int n,
use_raw_block =
((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) &&
(d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size;
mz_uint8 *pOutput_buf_start =
((d->m_pPut_buf_func == NULL) &&
((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE))
? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs)
: d->m_output_buf;
d->m_pOutput_buf = pOutput_buf_start;
d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16;
MZ_ASSERT(!d->m_output_flush_remaining);
d->m_output_flush_ofs = 0;
d->m_output_flush_remaining = 0;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left);
d->m_pLZ_code_buf -= (d->m_num_flags_left == 8);
if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) {
TDEFL_PUT_BITS(0x78, 8);
TDEFL_PUT_BITS(0x01, 8);
}
TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1);
pSaved_output_buf = d->m_pOutput_buf;
saved_bit_buf = d->m_bit_buffer;
saved_bits_in = d->m_bits_in;
if (!use_raw_block)
comp_block_succeeded =
tdefl_compress_block(d,
(d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) ||
(d->m_total_lz_bytes < 48));
// If the block gets expanded, forget the current contents of the output
// buffer and send a raw block instead.
if (((use_raw_block) ||
((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >=
d->m_total_lz_bytes))) &&
((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) {
mz_uint i;
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
TDEFL_PUT_BITS(0, 2);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) {
TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16);
}
for (i = 0; i < d->m_total_lz_bytes; ++i) {
TDEFL_PUT_BITS(
d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK],
8);
}
}
// Check for the extremely unlikely (if not impossible) case of the compressed
// block not fitting into the output buffer when using dynamic codes.
else if (!comp_block_succeeded) {
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
tdefl_compress_block(d, MZ_TRUE);
}
if (flush) {
if (flush == TDEFL_FINISH) {
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) {
mz_uint i, a = d->m_adler32;
for (i = 0; i < 4; i++) {
TDEFL_PUT_BITS((a >> 24) & 0xFF, 8);
a <<= 8;
}
}
} else {
mz_uint i, z = 0;
TDEFL_PUT_BITS(0, 3);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, z ^= 0xFFFF) {
TDEFL_PUT_BITS(z & 0xFFFF, 16);
}
}
}
MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end);
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes;
d->m_total_lz_bytes = 0;
d->m_block_index++;
if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) {
if (d->m_pPut_buf_func) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user))
return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED);
} else if (pOutput_buf_start == d->m_output_buf) {
int bytes_to_copy = (int)MZ_MIN(
(size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs));
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf,
bytes_to_copy);
d->m_out_buf_ofs += bytes_to_copy;
if ((n -= bytes_to_copy) != 0) {
d->m_output_flush_ofs = bytes_to_copy;
d->m_output_flush_remaining = n;
}
} else {
d->m_out_buf_ofs += n;
}
}
return d->m_output_flush_remaining;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p)
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q;
mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]),
s01 = TDEFL_READ_UNALIGNED_WORD(s);
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
q = (const mz_uint16 *)(d->m_dict + probe_pos);
if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue;
p = s;
probe_len = 32;
do {
} while (
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
if (!probe_len) {
*pMatch_dist = dist;
*pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN);
break;
} else if ((probe_len = ((mz_uint)(p - s) * 2) +
(mz_uint)(*(const mz_uint8 *)p ==
*(const mz_uint8 *)q)) > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) ==
max_match_len)
break;
c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]);
}
}
}
#else
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint8 *s = d->m_dict + pos, *p, *q;
mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1];
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if ((d->m_dict[probe_pos + match_len] == c0) && \
(d->m_dict[probe_pos + match_len - 1] == c1)) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
p = s;
q = d->m_dict + probe_pos;
for (probe_len = 0; probe_len < max_match_len; probe_len++)
if (*p++ != *q++) break;
if (probe_len > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = probe_len) == max_match_len) return;
c0 = d->m_dict[pos + match_len];
c1 = d->m_dict[pos + match_len - 1];
}
}
}
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static mz_bool tdefl_compress_fast(tdefl_compressor *d) {
// Faster, minimally featured LZRW1-style match+parse loop with better
// register utilization. Intended for applications where raw throughput is
// valued more highly than ratio.
mz_uint lookahead_pos = d->m_lookahead_pos,
lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size,
total_lz_bytes = d->m_total_lz_bytes,
num_flags_left = d->m_num_flags_left;
mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags;
mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) {
const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096;
mz_uint dst_pos =
(lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size);
d->m_src_buf_left -= num_bytes_to_process;
lookahead_size += num_bytes_to_process;
while (num_bytes_to_process) {
mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process);
memcpy(d->m_dict + dst_pos, d->m_pSrc, n);
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc,
MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos));
d->m_pSrc += n;
dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK;
num_bytes_to_process -= n;
}
dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size);
if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE))
break;
while (lookahead_size >= 4) {
mz_uint cur_match_dist, cur_match_len = 1;
mz_uint8 *pCur_dict = d->m_dict + cur_pos;
mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF;
mz_uint hash =
(first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) &
TDEFL_LEVEL1_HASH_SIZE_MASK;
mz_uint probe_pos = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)lookahead_pos;
if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <=
dict_size) &&
((*(const mz_uint32 *)(d->m_dict +
(probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) &
0xFFFFFF) == first_trigram)) {
const mz_uint16 *p = (const mz_uint16 *)pCur_dict;
const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos);
mz_uint32 probe_len = 32;
do {
} while ((TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) +
(mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q);
if (!probe_len)
cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0;
if ((cur_match_len < TDEFL_MIN_MATCH_LEN) ||
((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U))) {
cur_match_len = 1;
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
} else {
mz_uint32 s0, s1;
cur_match_len = MZ_MIN(cur_match_len, lookahead_size);
MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 1) &&
(cur_match_dist <= TDEFL_LZ_DICT_SIZE));
cur_match_dist--;
pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN);
*(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist;
pLZ_code_buf += 3;
*pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80);
s0 = s_tdefl_small_dist_sym[cur_match_dist & 511];
s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8];
d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++;
d->m_huff_count[0][s_tdefl_len_sym[cur_match_len -
TDEFL_MIN_MATCH_LEN]]++;
}
} else {
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
}
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
total_lz_bytes += cur_match_len;
lookahead_pos += cur_match_len;
dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK;
MZ_ASSERT(lookahead_size >= cur_match_len);
lookahead_size -= cur_match_len;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
while (lookahead_size) {
mz_uint8 lit = d->m_dict[cur_pos];
total_lz_bytes++;
*pLZ_code_buf++ = lit;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
lookahead_pos++;
dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
lookahead_size--;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
}
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
return MZ_TRUE;
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d,
mz_uint8 lit) {
d->m_total_lz_bytes++;
*d->m_pLZ_code_buf++ = lit;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
}
static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d,
mz_uint match_len,
mz_uint match_dist) {
mz_uint32 s0, s1;
MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) &&
(match_dist <= TDEFL_LZ_DICT_SIZE));
d->m_total_lz_bytes += match_len;
d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN);
match_dist -= 1;
d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF);
d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8);
d->m_pLZ_code_buf += 3;
*d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
s0 = s_tdefl_small_dist_sym[match_dist & 511];
s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127];
d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++;
if (match_len >= TDEFL_MIN_MATCH_LEN)
d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++;
}
static mz_bool tdefl_compress_normal(tdefl_compressor *d) {
const mz_uint8 *pSrc = d->m_pSrc;
size_t src_buf_left = d->m_src_buf_left;
tdefl_flush flush = d->m_flush;
while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) {
mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos;
// Update dictionary and hash chains. Keeps the lookahead size equal to
// TDEFL_MAX_MATCH_LEN.
if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) {
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK,
ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2;
mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK];
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size);
const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process;
src_buf_left -= num_bytes_to_process;
d->m_lookahead_size += num_bytes_to_process;
while (pSrc != pSrc_end) {
mz_uint8 c = *pSrc++;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
ins_pos++;
}
} else {
while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) {
mz_uint8 c = *pSrc++;
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK;
src_buf_left--;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) {
mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2;
mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< (TDEFL_LZ_HASH_SHIFT * 2)) ^
(d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
c) &
(TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
}
}
}
d->m_dict_size =
MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size);
if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break;
// Simple lazy/greedy parsing state machine.
len_to_move = 1;
cur_match_dist = 0;
cur_match_len =
d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1);
cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) {
if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) {
mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK];
cur_match_len = 0;
while (cur_match_len < d->m_lookahead_size) {
if (d->m_dict[cur_pos + cur_match_len] != c) break;
cur_match_len++;
}
if (cur_match_len < TDEFL_MIN_MATCH_LEN)
cur_match_len = 0;
else
cur_match_dist = 1;
}
} else {
tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size,
d->m_lookahead_size, &cur_match_dist, &cur_match_len);
}
if (((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U)) ||
(cur_pos == cur_match_dist) ||
((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) {
cur_match_dist = cur_match_len = 0;
}
if (d->m_saved_match_len) {
if (cur_match_len > d->m_saved_match_len) {
tdefl_record_literal(d, (mz_uint8)d->m_saved_lit);
if (cur_match_len >= 128) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
d->m_saved_match_len = 0;
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[cur_pos];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
} else {
tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist);
len_to_move = d->m_saved_match_len - 1;
d->m_saved_match_len = 0;
}
} else if (!cur_match_dist)
tdefl_record_literal(d,
d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]);
else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) ||
(cur_match_len >= 128)) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
// Move the lookahead forward by len_to_move bytes.
d->m_lookahead_pos += len_to_move;
MZ_ASSERT(d->m_lookahead_size >= len_to_move);
d->m_lookahead_size -= len_to_move;
d->m_dict_size =
MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE);
// Check if it's time to flush the current LZ codes to the internal output
// buffer.
if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) ||
((d->m_total_lz_bytes > 31 * 1024) &&
(((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >=
d->m_total_lz_bytes) ||
(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) {
int n;
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
}
}
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
return MZ_TRUE;
}
static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) {
if (d->m_pIn_buf_size) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
}
if (d->m_pOut_buf_size) {
size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs,
d->m_output_flush_remaining);
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs,
d->m_output_buf + d->m_output_flush_ofs, n);
d->m_output_flush_ofs += (mz_uint)n;
d->m_output_flush_remaining -= (mz_uint)n;
d->m_out_buf_ofs += n;
*d->m_pOut_buf_size = d->m_out_buf_ofs;
}
return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE
: TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush) {
if (!d) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return TDEFL_STATUS_BAD_PARAM;
}
d->m_pIn_buf = pIn_buf;
d->m_pIn_buf_size = pIn_buf_size;
d->m_pOut_buf = pOut_buf;
d->m_pOut_buf_size = pOut_buf_size;
d->m_pSrc = (const mz_uint8 *)(pIn_buf);
d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0;
d->m_out_buf_ofs = 0;
d->m_flush = flush;
if (((d->m_pPut_buf_func != NULL) ==
((pOut_buf != NULL) || (pOut_buf_size != NULL))) ||
(d->m_prev_return_status != TDEFL_STATUS_OKAY) ||
(d->m_wants_to_finish && (flush != TDEFL_FINISH)) ||
(pIn_buf_size && *pIn_buf_size && !pIn_buf) ||
(pOut_buf_size && *pOut_buf_size && !pOut_buf)) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM);
}
d->m_wants_to_finish |= (flush == TDEFL_FINISH);
if ((d->m_output_flush_remaining) || (d->m_finished))
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) &&
((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) &&
((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS |
TDEFL_RLE_MATCHES)) == 0)) {
if (!tdefl_compress_fast(d)) return d->m_prev_return_status;
} else
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
{
if (!tdefl_compress_normal(d)) return d->m_prev_return_status;
}
if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) &&
(pIn_buf))
d->m_adler32 =
(mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf,
d->m_pSrc - (const mz_uint8 *)pIn_buf);
if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) &&
(!d->m_output_flush_remaining)) {
if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status;
d->m_finished = (flush == TDEFL_FINISH);
if (flush == TDEFL_FULL_FLUSH) {
MZ_CLEAR_OBJ(d->m_hash);
MZ_CLEAR_OBJ(d->m_next);
d->m_dict_size = 0;
}
}
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
}
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush) {
MZ_ASSERT(d->m_pPut_buf_func);
return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush);
}
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
d->m_pPut_buf_func = pPut_buf_func;
d->m_pPut_buf_user = pPut_buf_user;
d->m_flags = (mz_uint)(flags);
d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3;
d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0;
d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3;
if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash);
d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size =
d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0;
d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished =
d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0;
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_pOutput_buf = d->m_output_buf;
d->m_pOutput_buf_end = d->m_output_buf;
d->m_prev_return_status = TDEFL_STATUS_OKAY;
d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0;
d->m_adler32 = 1;
d->m_pIn_buf = NULL;
d->m_pOut_buf = NULL;
d->m_pIn_buf_size = NULL;
d->m_pOut_buf_size = NULL;
d->m_flush = TDEFL_NO_FLUSH;
d->m_pSrc = NULL;
d->m_src_buf_left = 0;
d->m_out_buf_ofs = 0;
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
return TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) {
return d->m_prev_return_status;
}
mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; }
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
tdefl_compressor *pComp;
mz_bool succeeded;
if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE;
pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
if (!pComp) return MZ_FALSE;
succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) ==
TDEFL_STATUS_OKAY);
succeeded =
succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) ==
TDEFL_STATUS_DONE);
MZ_FREE(pComp);
return succeeded;
}
typedef struct {
size_t m_size, m_capacity;
mz_uint8 *m_pBuf;
mz_bool m_expandable;
} tdefl_output_buffer;
static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len,
void *pUser) {
tdefl_output_buffer *p = (tdefl_output_buffer *)pUser;
size_t new_size = p->m_size + len;
if (new_size > p->m_capacity) {
size_t new_capacity = p->m_capacity;
mz_uint8 *pNew_buf;
if (!p->m_expandable) return MZ_FALSE;
do {
new_capacity = MZ_MAX(128U, new_capacity << 1U);
} while (new_size > new_capacity);
pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity);
if (!pNew_buf) return MZ_FALSE;
p->m_pBuf = pNew_buf;
p->m_capacity = new_capacity;
}
memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len);
p->m_size = new_size;
return MZ_TRUE;
}
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_len)
return MZ_FALSE;
else
*pOut_len = 0;
out_buf.m_expandable = MZ_TRUE;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return NULL;
*pOut_len = out_buf.m_size;
return out_buf.m_pBuf;
}
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_buf) return 0;
out_buf.m_pBuf = (mz_uint8 *)pOut_buf;
out_buf.m_capacity = out_buf_len;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return 0;
return out_buf.m_size;
}
#ifndef MINIZ_NO_ZLIB_APIS
static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32,
128, 256, 512, 768, 1500};
// level may actually range from [0,10] (10 is a "hidden" max level, where we
// want a bit more compression and it's fine if throughput to fall off a cliff
// on some files).
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy) {
mz_uint comp_flags =
s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] |
((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0);
if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER;
if (!level)
comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS;
else if (strategy == MZ_FILTERED)
comp_flags |= TDEFL_FILTER_MATCHES;
else if (strategy == MZ_HUFFMAN_ONLY)
comp_flags &= ~TDEFL_MAX_PROBES_MASK;
else if (strategy == MZ_FIXED)
comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS;
else if (strategy == MZ_RLE)
comp_flags |= TDEFL_RLE_MATCHES;
return comp_flags;
}
#endif // MINIZ_NO_ZLIB_APIS
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning( \
disable : 4267) // 'argument': conversion from '__int64' to 'int',
// possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
// Simple PNG writer function by Alex Evans, 2011. Released into the public
// domain: https://gist.github.com/908299, more context at
// http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/.
// This is actually a modification of Alex's original code so PNG files
// generated by this function pass pngcheck.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip) {
// Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was
// defined.
static const mz_uint s_tdefl_png_num_probes[11] = {
0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500};
tdefl_compressor *pComp =
(tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
tdefl_output_buffer out_buf;
int i, bpl = w * num_chans, y, z;
mz_uint32 c;
*pLen_out = 0;
if (!pComp) return NULL;
MZ_CLEAR_OBJ(out_buf);
out_buf.m_expandable = MZ_TRUE;
out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h);
if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) {
MZ_FREE(pComp);
return NULL;
}
// write dummy header
for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf);
// compress image data
tdefl_init(
pComp, tdefl_output_buffer_putter, &out_buf,
s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER);
for (y = 0; y < h; ++y) {
tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH);
tdefl_compress_buffer(pComp,
(mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl,
bpl, TDEFL_NO_FLUSH);
}
if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) !=
TDEFL_STATUS_DONE) {
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
// write real header
*pLen_out = out_buf.m_size - 41;
{
static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06};
mz_uint8 pnghdr[41] = {0x89,
0x50,
0x4e,
0x47,
0x0d,
0x0a,
0x1a,
0x0a,
0x00,
0x00,
0x00,
0x0d,
0x49,
0x48,
0x44,
0x52,
0,
0,
(mz_uint8)(w >> 8),
(mz_uint8)w,
0,
0,
(mz_uint8)(h >> 8),
(mz_uint8)h,
8,
chans[num_chans],
0,
0,
0,
0,
0,
0,
0,
(mz_uint8)(*pLen_out >> 24),
(mz_uint8)(*pLen_out >> 16),
(mz_uint8)(*pLen_out >> 8),
(mz_uint8)*pLen_out,
0x49,
0x44,
0x41,
0x54};
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17);
for (i = 0; i < 4; ++i, c <<= 8)
((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24);
memcpy(out_buf.m_pBuf, pnghdr, 41);
}
// write footer (IDAT CRC-32, followed by IEND chunk)
if (!tdefl_output_buffer_putter(
"\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) {
*pLen_out = 0;
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4,
*pLen_out + 4);
for (i = 0; i < 4; ++i, c <<= 8)
(out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24);
// compute final size of file, grab compressed data buffer and return
*pLen_out += 57;
MZ_FREE(pComp);
return out_buf.m_pBuf;
}
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out) {
// Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we
// can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's
// where #defined out)
return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans,
pLen_out, 6, MZ_FALSE);
}
// ------------------- .ZIP archive reading
#ifndef MINIZ_NO_ARCHIVE_APIS
#error "No arvhive APIs"
#ifdef MINIZ_NO_STDIO
#define MZ_FILE void *
#else
#include <stdio.h>
#include <sys/stat.h>
#if defined(_MSC_VER) || defined(__MINGW64__)
static FILE *mz_fopen(const char *pFilename, const char *pMode) {
FILE *pFile = NULL;
fopen_s(&pFile, pFilename, pMode);
return pFile;
}
static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) {
FILE *pFile = NULL;
if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL;
return pFile;
}
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN mz_fopen
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 _ftelli64
#define MZ_FSEEK64 _fseeki64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN mz_freopen
#define MZ_DELETE_FILE remove
#elif defined(__MINGW32__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__TINYC__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftell
#define MZ_FSEEK64 fseek
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen64(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT stat64
#define MZ_FILE_STAT stat64
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(p, m, s) freopen64(p, m, s)
#define MZ_DELETE_FILE remove
#else
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello
#define MZ_FSEEK64 fseeko
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#endif // #ifdef _MSC_VER
#endif // #ifdef MINIZ_NO_STDIO
#define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c))
// Various ZIP archive enums. To completely avoid cross platform compiler
// alignment and platform endian issues, miniz.c doesn't use structs for any of
// this stuff.
enum {
// ZIP archive identifiers and record sizes
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50,
MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22,
// Central directory header record offsets
MZ_ZIP_CDH_SIG_OFS = 0,
MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4,
MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6,
MZ_ZIP_CDH_BIT_FLAG_OFS = 8,
MZ_ZIP_CDH_METHOD_OFS = 10,
MZ_ZIP_CDH_FILE_TIME_OFS = 12,
MZ_ZIP_CDH_FILE_DATE_OFS = 14,
MZ_ZIP_CDH_CRC32_OFS = 16,
MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20,
MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24,
MZ_ZIP_CDH_FILENAME_LEN_OFS = 28,
MZ_ZIP_CDH_EXTRA_LEN_OFS = 30,
MZ_ZIP_CDH_COMMENT_LEN_OFS = 32,
MZ_ZIP_CDH_DISK_START_OFS = 34,
MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36,
MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38,
MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42,
// Local directory header offsets
MZ_ZIP_LDH_SIG_OFS = 0,
MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4,
MZ_ZIP_LDH_BIT_FLAG_OFS = 6,
MZ_ZIP_LDH_METHOD_OFS = 8,
MZ_ZIP_LDH_FILE_TIME_OFS = 10,
MZ_ZIP_LDH_FILE_DATE_OFS = 12,
MZ_ZIP_LDH_CRC32_OFS = 14,
MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18,
MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22,
MZ_ZIP_LDH_FILENAME_LEN_OFS = 26,
MZ_ZIP_LDH_EXTRA_LEN_OFS = 28,
// End of central directory offsets
MZ_ZIP_ECDH_SIG_OFS = 0,
MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4,
MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6,
MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8,
MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10,
MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12,
MZ_ZIP_ECDH_CDIR_OFS_OFS = 16,
MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20,
};
typedef struct {
void *m_p;
size_t m_size, m_capacity;
mz_uint m_element_size;
} mz_zip_array;
struct mz_zip_internal_state_tag {
mz_zip_array m_central_dir;
mz_zip_array m_central_dir_offsets;
mz_zip_array m_sorted_central_dir_offsets;
MZ_FILE *m_pFile;
void *m_pMem;
size_t m_mem_size;
size_t m_mem_capacity;
};
#define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \
(array_ptr)->m_element_size = element_size
#define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \
((element_type *)((array_ptr)->m_p))[index]
static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip,
mz_zip_array *pArray) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p);
memset(pArray, 0, sizeof(mz_zip_array));
}
static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t min_new_capacity,
mz_uint growing) {
void *pNew_p;
size_t new_capacity = min_new_capacity;
MZ_ASSERT(pArray->m_element_size);
if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE;
if (growing) {
new_capacity = MZ_MAX(1, pArray->m_capacity);
while (new_capacity < min_new_capacity) new_capacity *= 2;
}
if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p,
pArray->m_element_size, new_capacity)))
return MZ_FALSE;
pArray->m_p = pNew_p;
pArray->m_capacity = new_capacity;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_capacity,
mz_uint growing) {
if (new_capacity > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing))
return MZ_FALSE;
}
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_size,
mz_uint growing) {
if (new_size > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing))
return MZ_FALSE;
}
pArray->m_size = new_size;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t n) {
return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE);
}
static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip,
mz_zip_array *pArray,
const void *pElements,
size_t n) {
size_t orig_size = pArray->m_size;
if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE))
return MZ_FALSE;
memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size,
pElements, n * pArray->m_element_size);
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) {
struct tm tm;
memset(&tm, 0, sizeof(tm));
tm.tm_isdst = -1;
tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900;
tm.tm_mon = ((dos_date >> 5) & 15) - 1;
tm.tm_mday = dos_date & 31;
tm.tm_hour = (dos_time >> 11) & 31;
tm.tm_min = (dos_time >> 5) & 63;
tm.tm_sec = (dos_time << 1) & 62;
return mktime(&tm);
}
static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef _MSC_VER
struct tm tm_struct;
struct tm *tm = &tm_struct;
errno_t err = localtime_s(tm, &time);
if (err) {
*pDOS_date = 0;
*pDOS_time = 0;
return;
}
#else
struct tm *tm = localtime(&time);
#endif
*pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) +
((tm->tm_sec) >> 1));
*pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) +
((tm->tm_mon + 1) << 5) + tm->tm_mday);
}
#endif
#ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_get_file_modified_time(const char *pFilename,
mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef MINIZ_NO_TIME
(void)pFilename;
*pDOS_date = *pDOS_time = 0;
#else
struct MZ_FILE_STAT_STRUCT file_stat;
// On Linux with x86 glibc, this call will fail on large files (>= 0x80000000
// bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh.
if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE;
mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date);
#endif // #ifdef MINIZ_NO_TIME
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time,
time_t modified_time) {
struct utimbuf t;
t.actime = access_time;
t.modtime = modified_time;
return !utime(pFilename, &t);
}
#endif // #ifndef MINIZ_NO_TIME
#endif // #ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip,
mz_uint32 flags) {
(void)flags;
if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_READING;
pZip->m_archive_size = 0;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool
mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets,
mz_uint l_index, mz_uint r_index) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index));
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS),
r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (l_len < r_len) : (l < r);
}
#define MZ_SWAP_UINT32(a, b) \
do { \
mz_uint32 t = a; \
a = b; \
b = t; \
} \
MZ_MACRO_END
// Heap sort of lowercased filenames, used to help accelerate plain central
// directory searches by mz_zip_reader_locate_file(). (Could also use qsort(),
// but it could allocate memory.)
static void mz_zip_reader_sort_central_dir_offsets_by_filename(
mz_zip_archive *pZip) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
int start = (size - 2) >> 1, end;
while (start >= 0) {
int child, root = start;
for (;;) {
if ((child = (root << 1) + 1) >= size) break;
child +=
(((child + 1) < size) &&
(mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1])));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
start--;
}
end = size - 1;
while (end > 0) {
int child, root = 0;
MZ_SWAP_UINT32(pIndices[end], pIndices[0]);
for (;;) {
if ((child = (root << 1) + 1) >= end) break;
child +=
(((child + 1) < end) &&
mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1]));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
end--;
}
}
static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip,
mz_uint32 flags) {
mz_uint cdir_size, num_this_disk, cdir_disk_index;
mz_uint64 cdir_ofs;
mz_int64 cur_file_ofs;
const mz_uint8 *p;
mz_uint32 buf_u32[4096 / sizeof(mz_uint32)];
mz_uint8 *pBuf = (mz_uint8 *)buf_u32;
mz_bool sort_central_dir =
((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0);
// Basic sanity checks - reject files which are too small, and check the first
// 4 bytes of the file to make sure a local header is there.
if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
// Find the end of central directory record by scanning the file from the end
// towards the beginning.
cur_file_ofs =
MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0);
for (;;) {
int i,
n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n)
return MZ_FALSE;
for (i = n - 4; i >= 0; --i)
if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break;
if (i >= 0) {
cur_file_ofs += i;
break;
}
if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >=
(0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)))
return MZ_FALSE;
cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0);
}
// Read and verify the end of central directory record.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) ||
((pZip->m_total_files =
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) !=
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS)))
return MZ_FALSE;
num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS);
cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS);
if (((num_this_disk | cdir_disk_index) != 0) &&
((num_this_disk != 1) || (cdir_disk_index != 1)))
return MZ_FALSE;
if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) <
pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS);
if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE;
pZip->m_central_directory_file_ofs = cdir_ofs;
if (pZip->m_total_files) {
mz_uint i, n;
// Read the entire central directory into a heap block, and allocate another
// heap block to hold the unsorted central dir file record offsets, and
// another to hold the sorted indices.
if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size,
MZ_FALSE)) ||
(!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets,
pZip->m_total_files, MZ_FALSE)))
return MZ_FALSE;
if (sort_central_dir) {
if (!mz_zip_array_resize(pZip,
&pZip->m_pState->m_sorted_central_dir_offsets,
pZip->m_total_files, MZ_FALSE))
return MZ_FALSE;
}
if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs,
pZip->m_pState->m_central_dir.m_p,
cdir_size) != cdir_size)
return MZ_FALSE;
// Now create an index into the central directory file records, do some
// basic sanity checking on each record, and check for zip64 entries (which
// are not yet supported).
p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p;
for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) {
mz_uint total_header_size, comp_size, decomp_size, disk_index;
if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) ||
(MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG))
return MZ_FALSE;
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
i) =
(mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p);
if (sort_central_dir)
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets,
mz_uint32, i) = i;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) &&
(decomp_size != comp_size)) ||
(decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) ||
(comp_size == 0xFFFFFFFF))
return MZ_FALSE;
disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS);
if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE;
if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) >
n)
return MZ_FALSE;
n -= total_header_size;
p += total_header_size;
}
}
if (sort_central_dir)
mz_zip_reader_sort_central_dir_offsets_by_filename(pZip);
return MZ_TRUE;
}
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags) {
if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE;
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
size_t s = (file_ofs >= pZip->m_archive_size)
? 0
: (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n);
memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s);
return s;
}
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags) {
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
pZip->m_pRead = mz_zip_mem_read_func;
pZip->m_pIO_opaque = pZip;
#ifdef __cplusplus
pZip->m_pState->m_pMem = const_cast<void *>(pMem);
#else
pZip->m_pState->m_pMem = (void *)pMem;
#endif
pZip->m_pState->m_mem_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags) {
mz_uint64 file_size;
MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb");
if (!pFile) return MZ_FALSE;
if (MZ_FSEEK64(pFile, 0, SEEK_END)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
file_size = MZ_FTELL64(pFile);
if (!mz_zip_reader_init_internal(pZip, flags)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
pZip->m_pRead = mz_zip_file_read_func;
pZip->m_pIO_opaque = pZip;
pZip->m_pState->m_pFile = pFile;
pZip->m_archive_size = file_size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) {
return pZip ? pZip->m_total_files : 0;
}
static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh(
mz_zip_archive *pZip, mz_uint file_index) {
if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return NULL;
return &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
}
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint m_bit_flag;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
return (m_bit_flag & 1);
}
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint filename_len, external_attr;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
// First see if the filename ends with a '/' character.
filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_len) {
if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/')
return MZ_TRUE;
}
// Bugfix: This code was also checking if the internal attribute was non-zero,
// which wasn't correct.
// Most/all zip writers (hopefully) set DOS file/directory attributes in the
// low 16-bits, so check for the DOS directory flag and ignore the source OS
// ID in the created by field.
// FIXME: Remove this check? Is it necessary - we already check the filename.
external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
if ((external_attr & 0x10) != 0) return MZ_TRUE;
return MZ_FALSE;
}
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if ((!p) || (!pStat)) return MZ_FALSE;
// Unpack the central directory record.
pStat->m_file_index = file_index;
pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index);
pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS);
pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS);
pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS);
#ifndef MINIZ_NO_TIME
pStat->m_time =
mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS),
MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS));
#endif
pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS);
pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS);
pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
// Copy as much of the filename and comment as possible.
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1);
memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pStat->m_filename[n] = '\0';
n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1);
pStat->m_comment_size = n;
memcpy(pStat->m_comment,
p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS),
n);
pStat->m_comment[n] = '\0';
return MZ_TRUE;
}
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) {
if (filename_buf_size) pFilename[0] = '\0';
return 0;
}
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_buf_size) {
n = MZ_MIN(n, filename_buf_size - 1);
memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pFilename[n] = '\0';
}
return n + 1;
}
static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA,
const char *pB,
mz_uint len,
mz_uint flags) {
mz_uint i;
if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len);
for (i = 0; i < len; ++i)
if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE;
return MZ_TRUE;
}
static MZ_FORCEINLINE int mz_zip_reader_filename_compare(
const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR,
mz_uint r_len) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (int)(l_len - r_len) : (l - r);
}
static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
const mz_uint filename_len = (mz_uint)strlen(pFilename);
int l = 0, h = size - 1;
while (l <= h) {
int m = (l + h) >> 1, file_index = pIndices[m],
comp =
mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets,
file_index, pFilename, filename_len);
if (!comp)
return file_index;
else if (comp < 0)
l = m + 1;
else
h = m - 1;
}
return -1;
}
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags) {
mz_uint file_index;
size_t name_len, comment_len;
if ((!pZip) || (!pZip->m_pState) || (!pName) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return -1;
if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) &&
(!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size))
return mz_zip_reader_locate_file_binary_search(pZip, pName);
name_len = strlen(pName);
if (name_len > 0xFFFF) return -1;
comment_len = pComment ? strlen(pComment) : 0;
if (comment_len > 0xFFFF) return -1;
for (file_index = 0; file_index < pZip->m_total_files; file_index++) {
const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS);
const char *pFilename =
(const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
if (filename_len < name_len) continue;
if (comment_len) {
mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS),
file_comment_len =
MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS);
const char *pFile_comment = pFilename + filename_len + file_extra_len;
if ((file_comment_len != comment_len) ||
(!mz_zip_reader_string_equal(pComment, pFile_comment,
file_comment_len, flags)))
continue;
}
if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) {
int ofs = filename_len - 1;
do {
if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') ||
(pFilename[ofs] == ':'))
break;
} while (--ofs >= 0);
ofs++;
pFilename += ofs;
filename_len -= ofs;
}
if ((filename_len == name_len) &&
(mz_zip_reader_string_equal(pName, pFilename, filename_len, flags)))
return file_index;
}
return -1;
}
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size) {
int status = TINFL_STATUS_DONE;
mz_uint64 needed_size, cur_file_ofs, comp_remaining,
out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail;
mz_zip_archive_file_stat file_stat;
void *pRead_buf;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
tinfl_decompressor inflator;
if ((buf_size) && (!pBuf)) return MZ_FALSE;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Ensure supplied output buffer is large enough.
needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size
: file_stat.m_uncomp_size;
if (buf_size < needed_size) return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
(size_t)needed_size) != needed_size)
return MZ_FALSE;
return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) == file_stat.m_crc32);
}
// Decompress the file either directly from memory or from a file input
// buffer.
tinfl_init(&inflator);
if (pZip->m_pState->m_pMem) {
// Read directly from the archive in memory.
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else if (pUser_read_buf) {
// Use a user provided read buffer.
if (!user_read_buf_size) return MZ_FALSE;
pRead_buf = (mz_uint8 *)pUser_read_buf;
read_buf_size = user_read_buf_size;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
} else {
// Temporarily allocate a read buffer.
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(read_buf_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF))
#endif
return MZ_FALSE;
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
do {
size_t in_buf_size,
out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs);
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF |
(comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0));
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
out_buf_ofs += out_buf_size;
} while (status == TINFL_STATUS_NEEDS_MORE_INPUT);
if (status == TINFL_STATUS_DONE) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf))
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, pUser_read_buf,
user_read_buf_size);
}
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags) {
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, NULL, 0);
}
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags) {
return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf,
buf_size, flags, NULL, 0);
}
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags) {
mz_uint64 comp_size, uncomp_size, alloc_size;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
void *pBuf;
if (pSize) *pSize = 0;
if (!p) return NULL;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size;
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#endif
return NULL;
if (NULL ==
(pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size)))
return NULL;
if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size,
flags)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return NULL;
}
if (pSize) *pSize = (size_t)alloc_size;
return pBuf;
}
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) {
if (pSize) *pSize = 0;
return MZ_FALSE;
}
return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags);
}
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int status = TINFL_STATUS_DONE;
mz_uint file_crc32 = MZ_CRC32_INIT;
mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining,
out_buf_ofs = 0, cur_file_ofs;
mz_zip_archive_file_stat file_stat;
void *pRead_buf = NULL;
void *pWrite_buf = NULL;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
// Decompress the file either directly from memory or from a file input
// buffer.
if (pZip->m_pState->m_pMem) {
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else {
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pState->m_pMem) {
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#endif
return MZ_FALSE;
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)file_stat.m_comp_size) != file_stat.m_comp_size)
status = TINFL_STATUS_FAILED;
else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf,
(size_t)file_stat.m_comp_size);
cur_file_ofs += file_stat.m_comp_size;
out_buf_ofs += file_stat.m_comp_size;
comp_remaining = 0;
} else {
while (comp_remaining) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 = (mz_uint32)mz_crc32(
file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail);
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
out_buf_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
}
}
} else {
tinfl_decompressor inflator;
tinfl_init(&inflator);
if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
TINFL_LZ_DICT_SIZE)))
status = TINFL_STATUS_FAILED;
else {
do {
mz_uint8 *pWrite_buf_cur =
(mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
size_t in_buf_size,
out_buf_size =
TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size,
comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0);
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
if (out_buf_size) {
if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) !=
out_buf_size) {
status = TINFL_STATUS_FAILED;
break;
}
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size);
if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) {
status = TINFL_STATUS_FAILED;
break;
}
}
} while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) ||
(status == TINFL_STATUS_HAS_MORE_OUTPUT));
}
}
if ((status == TINFL_STATUS_DONE) &&
(!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(file_crc32 != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque,
flags);
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs,
const void *pBuf, size_t n) {
(void)ofs;
return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque);
}
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename,
mz_uint flags) {
mz_bool status;
mz_zip_archive_file_stat file_stat;
MZ_FILE *pFile;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
pFile = MZ_FOPEN(pDst_filename, "wb");
if (!pFile) return MZ_FALSE;
status = mz_zip_reader_extract_to_callback(
pZip, file_index, mz_zip_file_write_callback, pFile, flags);
if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
if (status)
mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time);
#endif
return status;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_end(mz_zip_archive *pZip) {
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
if (pZip->m_pState) {
mz_zip_internal_state *pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
}
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags) {
int file_index =
mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags);
}
#endif
// ------------------- .ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
static void mz_write_le16(mz_uint8 *p, mz_uint16 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
}
static void mz_write_le32(mz_uint8 *p, mz_uint32 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
p[2] = (mz_uint8)(v >> 16);
p[3] = (mz_uint8)(v >> 24);
}
#define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v))
#define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v))
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) {
if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (pZip->m_file_offset_alignment) {
// Ensure user specified file offset alignment is a power of 2.
if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1))
return MZ_FALSE;
}
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_archive_size = existing_size;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size);
#ifdef _MSC_VER
if ((!n) ||
((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#else
if ((!n) ||
((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#endif
return 0;
if (new_size > pState->m_mem_capacity) {
void *pNew_block;
size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity);
while (new_capacity < new_size) new_capacity *= 2;
if (NULL == (pNew_block = pZip->m_pRealloc(
pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity)))
return 0;
pState->m_pMem = pNew_block;
pState->m_mem_capacity = new_capacity;
}
memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n);
pState->m_mem_size = (size_t)new_size;
return n;
}
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size) {
pZip->m_pWrite = mz_zip_heap_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size,
size_to_reserve_at_beginning))) {
if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, initial_allocation_size))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_mem_capacity = initial_allocation_size;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning) {
MZ_FILE *pFile;
pZip->m_pWrite = mz_zip_file_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_pFile = pFile;
if (size_to_reserve_at_beginning) {
mz_uint64 cur_ofs = 0;
char buf[4096];
MZ_CLEAR_OBJ(buf);
do {
size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
cur_ofs += n;
size_to_reserve_at_beginning -= n;
} while (size_to_reserve_at_beginning);
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
// No sense in trying to write to an archive that's already at the support max
// size
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
pState = pZip->m_pState;
if (pState->m_pFile) {
#ifdef MINIZ_NO_STDIO
pFilename;
return MZ_FALSE;
#else
// Archive is being read from stdio - try to reopen as writable.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
if (!pFilename) return MZ_FALSE;
pZip->m_pWrite = mz_zip_file_write_func;
if (NULL ==
(pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) {
// The mz_zip_archive is now in a bogus state because pState->m_pFile is
// NULL, so just close it.
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
#endif // #ifdef MINIZ_NO_STDIO
} else if (pState->m_pMem) {
// Archive lives in a memory block. Assume it's from the heap that we can
// resize using the realloc callback.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
pState->m_mem_capacity = pState->m_mem_size;
pZip->m_pWrite = mz_zip_heap_write_func;
}
// Archive is being read via a user provided read function - make sure the
// user has specified a write function too.
else if (!pZip->m_pWrite)
return MZ_FALSE;
// Start writing new files at the archive's current central directory
// location.
pZip->m_archive_size = pZip->m_central_directory_file_ofs;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_central_directory_file_ofs = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags) {
return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0,
level_and_flags, 0, 0);
}
typedef struct {
mz_zip_archive *m_pZip;
mz_uint64 m_cur_archive_file_ofs;
mz_uint64 m_comp_size;
} mz_zip_writer_add_state;
static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len,
void *pUser) {
mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser;
if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque,
pState->m_cur_archive_file_ofs, pBuf,
len) != len)
return MZ_FALSE;
pState->m_cur_archive_file_ofs += len;
pState->m_comp_size += len;
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_local_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_central_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size,
mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method,
mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date,
mz_uint64 local_header_ofs, mz_uint32 ext_attributes) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_add_to_central_dir(
mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size,
const void *pExtra, mz_uint16 extra_size, const void *pComment,
mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs,
mz_uint32 ext_attributes) {
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size;
size_t orig_central_dir_size = pState->m_central_dir.m_size;
mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
// No zip64 support yet
if ((local_header_ofs > 0xFFFFFFFF) ||
(((mz_uint64)pState->m_central_dir.m_size +
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size +
comment_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_central_dir_header(
pZip, central_dir_header, filename_size, extra_size, comment_size,
uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time,
dos_date, local_header_ofs, ext_attributes))
return MZ_FALSE;
if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename,
filename_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra,
extra_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment,
comment_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets,
¢ral_dir_ofs, 1))) {
// Try to push the central directory array back into its original state.
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
return MZ_TRUE;
}
static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) {
// Basic ZIP archive filename validity checks: Valid filenames cannot start
// with a forward slash, cannot contain a drive letter, and cannot use
// DOS-style backward slashes.
if (*pArchive_name == '/') return MZ_FALSE;
while (*pArchive_name) {
if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE;
pArchive_name++;
}
return MZ_TRUE;
}
static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment(
mz_zip_archive *pZip) {
mz_uint32 n;
if (!pZip->m_file_offset_alignment) return 0;
n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1));
return (pZip->m_file_offset_alignment - n) &
(pZip->m_file_offset_alignment - 1);
}
static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip,
mz_uint64 cur_file_ofs, mz_uint32 n) {
char buf[4096];
memset(buf, 0, MZ_MIN(sizeof(buf), n));
while (n) {
mz_uint32 s = MZ_MIN(sizeof(buf), n);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s)
return MZ_FALSE;
cur_file_ofs += s;
n -= s;
}
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32) {
mz_uint16 method = 0, dos_time = 0, dos_date = 0;
mz_uint level, ext_attributes = 0, num_alignment_padding_bytes;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
tdefl_compressor *pComp = NULL;
mz_bool store_data_uncompressed;
mz_zip_internal_state *pState;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
store_data_uncompressed =
((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA));
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) ||
(!pArchive_name) || ((comment_size) && (!pComment)) ||
(pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
pState = pZip->m_pState;
if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size))
return MZ_FALSE;
// No zip64 support yet
if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
{
time_t cur_time;
time(&cur_time);
mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date);
}
#endif // #ifndef MINIZ_NO_TIME
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) {
// Set DOS Subdirectory attribute bit.
ext_attributes |= 0x10;
// Subdirectories cannot contain data.
if ((buf_size) || (uncomp_size)) return MZ_FALSE;
}
// Try to do any allocations before writing to the archive, so if an
// allocation fails the file remains unmodified. (A good idea if we're doing
// an in-place modification.)
if ((!mz_zip_array_ensure_room(
pZip, &pState->m_central_dir,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) ||
(!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1)))
return MZ_FALSE;
if ((!store_data_uncompressed) && (buf_size)) {
if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor))))
return MZ_FALSE;
}
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) {
uncomp_crc32 =
(mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size);
uncomp_size = buf_size;
if (uncomp_size <= 3) {
level = 0;
store_data_uncompressed = MZ_TRUE;
}
}
if (store_data_uncompressed) {
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf,
buf_size) != buf_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += buf_size;
comp_size = buf_size;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED;
} else if (buf_size) {
mz_zip_writer_add_state state;
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) ||
(tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) !=
TDEFL_STATUS_DONE)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pComp = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes;
mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0,
comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
MZ_FILE *pSrc_file = NULL;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) ||
((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date))
return MZ_FALSE;
pSrc_file = MZ_FOPEN(pSrc_filename, "rb");
if (!pSrc_file) return MZ_FALSE;
MZ_FSEEK64(pSrc_file, 0, SEEK_END);
uncomp_size = MZ_FTELL64(pSrc_file);
MZ_FSEEK64(pSrc_file, 0, SEEK_SET);
if (uncomp_size > 0xFFFFFFFF) {
// No zip64 support yet
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (uncomp_size <= 3) level = 0;
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (uncomp_size) {
mz_uint64 uncomp_remaining = uncomp_size;
void *pRead_buf =
pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE);
if (!pRead_buf) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (!level) {
while (uncomp_remaining) {
mz_uint n =
(mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining);
if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) ||
(pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf,
n) != n)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
uncomp_crc32 =
(mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n);
uncomp_remaining -= n;
cur_archive_file_ofs += n;
}
comp_size = uncomp_size;
} else {
mz_bool result = MZ_FALSE;
mz_zip_writer_add_state state;
tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor));
if (!pComp) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
for (;;) {
size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining,
(mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
tdefl_status status;
if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size)
break;
uncomp_crc32 = (mz_uint32)mz_crc32(
uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size);
uncomp_remaining -= in_buf_size;
status = tdefl_compress_buffer(
pComp, pRead_buf, in_buf_size,
uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH);
if (status == TDEFL_STATUS_DONE) {
result = MZ_TRUE;
break;
} else if (status != TDEFL_STATUS_OKAY)
break;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
if (!result) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
}
MZ_FCLOSE(pSrc_file);
pSrc_file = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index) {
mz_uint n, bit_flags, num_alignment_padding_bytes;
mz_uint64 comp_bytes_remaining, local_dir_header_ofs;
mz_uint64 cur_src_file_ofs, cur_dst_file_ofs;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
size_t orig_central_dir_size;
mz_zip_internal_state *pState;
void *pBuf;
const mz_uint8 *pSrc_central_header;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
if (NULL ==
(pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index)))
return MZ_FALSE;
pState = pZip->m_pState;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) >
0xFFFFFFFF))
return MZ_FALSE;
cur_src_file_ofs =
MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
cur_dst_file_ofs = pZip->m_archive_size;
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs,
pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs,
num_alignment_padding_bytes))
return MZ_FALSE;
cur_dst_file_ofs += num_alignment_padding_bytes;
local_dir_header_ofs = cur_dst_file_ofs;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
comp_bytes_remaining =
n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
if (NULL == (pBuf = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1,
(size_t)MZ_MAX(sizeof(mz_uint32) * 4,
MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE,
comp_bytes_remaining)))))
return MZ_FALSE;
while (comp_bytes_remaining) {
n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining);
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_dst_file_ofs += n;
comp_bytes_remaining -= n;
}
bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS);
if (bit_flags & 8) {
// Copy data descriptor
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
cur_dst_file_ofs += n;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
// no zip64 support yet
if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE;
orig_central_dir_size = pState->m_central_dir.m_size;
memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS,
local_dir_header_ofs);
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE))
return MZ_FALSE;
n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS);
if (!mz_zip_array_push_back(
pZip, &pState->m_central_dir,
pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE;
n = (mz_uint32)orig_central_dir_size;
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
pZip->m_total_files++;
pZip->m_archive_size = cur_dst_file_ofs;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_uint64 central_dir_ofs, central_dir_size;
mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE];
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
pState = pZip->m_pState;
// no zip64 support yet
if ((pZip->m_total_files > 0xFFFF) ||
((pZip->m_archive_size + pState->m_central_dir.m_size +
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
central_dir_ofs = 0;
central_dir_size = 0;
if (pZip->m_total_files) {
// Write central directory
central_dir_ofs = pZip->m_archive_size;
central_dir_size = pState->m_central_dir.m_size;
pZip->m_central_directory_file_ofs = central_dir_ofs;
if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs,
pState->m_central_dir.m_p,
(size_t)central_dir_size) != central_dir_size)
return MZ_FALSE;
pZip->m_archive_size += central_dir_size;
}
// Write end of central directory record
MZ_CLEAR_OBJ(hdr);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS,
pZip->m_total_files);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs);
if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr,
sizeof(hdr)) != sizeof(hdr))
return MZ_FALSE;
#ifndef MINIZ_NO_STDIO
if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE;
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_archive_size += sizeof(hdr);
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize) {
if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE;
if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE;
if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE;
*pBuf = pZip->m_pState->m_pMem;
*pSize = pZip->m_pState->m_mem_size;
pZip->m_pState->m_pMem = NULL;
pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_end(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_bool status = MZ_TRUE;
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) &&
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED)))
return MZ_FALSE;
pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem);
pState->m_pMem = NULL;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return status;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_bool status, created_new_archive = MZ_FALSE;
mz_zip_archive zip_archive;
struct MZ_FILE_STAT_STRUCT file_stat;
MZ_CLEAR_OBJ(zip_archive);
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) ||
((comment_size) && (!pComment)) ||
((level_and_flags & 0xF) > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) {
// Create a new archive.
if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0))
return MZ_FALSE;
created_new_archive = MZ_TRUE;
} else {
// Append to an existing archive.
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return MZ_FALSE;
if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) {
mz_zip_reader_end(&zip_archive);
return MZ_FALSE;
}
}
status =
mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size,
pComment, comment_size, level_and_flags, 0, 0);
// Always finalize, even if adding failed for some reason, so we have a valid
// central directory. (This may not always succeed, but we can try.)
if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE;
if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE;
if ((!status) && (created_new_archive)) {
// It's a new archive and something went wrong, so just delete it.
int ignoredStatus = MZ_DELETE_FILE(pZip_filename);
(void)ignoredStatus;
}
return status;
}
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint flags) {
int file_index;
mz_zip_archive zip_archive;
void *p = NULL;
if (pSize) *pSize = 0;
if ((!pZip_filename) || (!pArchive_name)) return NULL;
MZ_CLEAR_OBJ(zip_archive);
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return NULL;
if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL,
flags)) >= 0)
p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags);
mz_zip_reader_end(&zip_archive);
return p;
}
#endif // #ifndef MINIZ_NO_STDIO
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
#ifdef __cplusplus
}
#endif
#endif // MINIZ_HEADER_FILE_ONLY
/*
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>
*/
// ---------------------- end of miniz ----------------------------------------
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#ifdef _MSC_VER
#pragma warning(pop)
#endif
}
#else
// Reuse MINIZ_LITTE_ENDIAN macro
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#endif // TINYEXR_USE_MINIZ
// static bool IsBigEndian(void) {
// union {
// unsigned int i;
// char c[4];
// } bint = {0x01020304};
//
// return bint.c[0] == 1;
//}
static const int kEXRVersionSize = 8;
static void swap2(unsigned short *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned short tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[1];
dst[1] = src[0];
#endif
}
static void swap4(unsigned int *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
static void swap8(tinyexr::tinyexr_uint64 *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
tinyexr::tinyexr_uint64 tmp = (*val);
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[7];
dst[1] = src[6];
dst[2] = src[5];
dst[3] = src[4];
dst[4] = src[3];
dst[5] = src[2];
dst[6] = src[1];
dst[7] = src[0];
#endif
}
// https://gist.github.com/rygorous/2156668
// Reuse MINIZ_LITTLE_ENDIAN flag from miniz.
union FP32 {
unsigned int u;
float f;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 23;
unsigned int Exponent : 8;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 8;
unsigned int Mantissa : 23;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
#endif
union FP16 {
unsigned short u;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 10;
unsigned int Exponent : 5;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 5;
unsigned int Mantissa : 10;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic pop
#endif
static FP32 half_to_float(FP16 h) {
static const FP32 magic = {113 << 23};
static const unsigned int shifted_exp = 0x7c00
<< 13; // exponent mask after shift
FP32 o;
o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits
unsigned int exp_ = shifted_exp & o.u; // just the exponent
o.u += (127 - 15) << 23; // exponent adjust
// handle exponent special cases
if (exp_ == shifted_exp) // Inf/NaN?
o.u += (128 - 16) << 23; // extra exp adjust
else if (exp_ == 0) // Zero/Denormal?
{
o.u += 1 << 23; // extra exp adjust
o.f -= magic.f; // renormalize
}
o.u |= (h.u & 0x8000U) << 16U; // sign bit
return o;
}
static FP16 float_to_half_full(FP32 f) {
FP16 o = {0};
// Based on ISPC reference code (with minor modifications)
if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow)
o.s.Exponent = 0;
else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set)
{
o.s.Exponent = 31;
o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf
} else // Normalized number
{
// Exponent unbias the single, then bias the halfp
int newexp = f.s.Exponent - 127 + 15;
if (newexp >= 31) // Overflow, return signed infinity
o.s.Exponent = 31;
else if (newexp <= 0) // Underflow
{
if ((14 - newexp) <= 24) // Mantissa might be non-zero
{
unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit
o.s.Mantissa = mant >> (14 - newexp);
if ((mant >> (13 - newexp)) & 1) // Check for rounding
o.u++; // Round, might overflow into exp bit, but this is OK
}
} else {
o.s.Exponent = static_cast<unsigned int>(newexp);
o.s.Mantissa = f.s.Mantissa >> 13;
if (f.s.Mantissa & 0x1000) // Check for rounding
o.u++; // Round, might overflow to inf, this is OK
}
}
o.s.Sign = f.s.Sign;
return o;
}
// NOTE: From OpenEXR code
// #define IMF_INCREASING_Y 0
// #define IMF_DECREASING_Y 1
// #define IMF_RAMDOM_Y 2
//
// #define IMF_NO_COMPRESSION 0
// #define IMF_RLE_COMPRESSION 1
// #define IMF_ZIPS_COMPRESSION 2
// #define IMF_ZIP_COMPRESSION 3
// #define IMF_PIZ_COMPRESSION 4
// #define IMF_PXR24_COMPRESSION 5
// #define IMF_B44_COMPRESSION 6
// #define IMF_B44A_COMPRESSION 7
#ifdef __clang__
#pragma clang diagnostic push
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#endif
static const char *ReadString(std::string *s, const char *ptr, size_t len) {
// Read untile NULL(\0).
const char *p = ptr;
const char *q = ptr;
while ((size_t(q - ptr) < len) && (*q) != 0) {
q++;
}
if (size_t(q - ptr) >= len) {
(*s) = std::string();
return NULL;
}
(*s) = std::string(p, q);
return q + 1; // skip '\0'
}
static bool ReadAttribute(std::string *name, std::string *type,
std::vector<unsigned char> *data, size_t *marker_size,
const char *marker, size_t size) {
size_t name_len = strnlen(marker, size);
if (name_len == size) {
// String does not have a terminating character.
return false;
}
*name = std::string(marker, name_len);
marker += name_len + 1;
size -= name_len + 1;
size_t type_len = strnlen(marker, size);
if (type_len == size) {
return false;
}
*type = std::string(marker, type_len);
marker += type_len + 1;
size -= type_len + 1;
if (size < sizeof(uint32_t)) {
return false;
}
uint32_t data_len;
memcpy(&data_len, marker, sizeof(uint32_t));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len == 0) {
return false;
}
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
if (size < data_len) {
return false;
}
data->resize(static_cast<size_t>(data_len));
memcpy(&data->at(0), marker, static_cast<size_t>(data_len));
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len;
return true;
}
static void WriteAttributeToMemory(std::vector<unsigned char> *out,
const char *name, const char *type,
const unsigned char *data, int len) {
out->insert(out->end(), name, name + strlen(name) + 1);
out->insert(out->end(), type, type + strlen(type) + 1);
int outLen = len;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&outLen));
out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen),
reinterpret_cast<unsigned char *>(&outLen) + sizeof(int));
out->insert(out->end(), data, data + len);
}
typedef struct {
std::string name; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} ChannelInfo;
typedef struct {
std::vector<tinyexr::ChannelInfo> channels;
std::vector<EXRAttribute> attributes;
int data_window[4];
int line_order;
int display_window[4];
float screen_window_center[2];
float screen_window_width;
float pixel_aspect_ratio;
int chunk_count;
// Tiled format
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
unsigned int header_len;
int compression_type;
void clear() {
channels.clear();
attributes.clear();
data_window[0] = 0;
data_window[1] = 0;
data_window[2] = 0;
data_window[3] = 0;
line_order = 0;
display_window[0] = 0;
display_window[1] = 0;
display_window[2] = 0;
display_window[3] = 0;
screen_window_center[0] = 0.0f;
screen_window_center[1] = 0.0f;
screen_window_width = 0.0f;
pixel_aspect_ratio = 0.0f;
chunk_count = 0;
// Tiled format
tile_size_x = 0;
tile_size_y = 0;
tile_level_mode = 0;
tile_rounding_mode = 0;
header_len = 0;
compression_type = 0;
}
} HeaderInfo;
static bool ReadChannelInfo(std::vector<ChannelInfo> &channels,
const std::vector<unsigned char> &data) {
const char *p = reinterpret_cast<const char *>(&data.at(0));
for (;;) {
if ((*p) == 0) {
break;
}
ChannelInfo info;
tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) - (p - reinterpret_cast<const char *>(data.data()));
if (data_len < 0) {
return false;
}
p = ReadString(
&info.name, p, size_t(data_len));
if ((p == NULL) && (info.name.empty())) {
// Buffer overrun. Issue #51.
return false;
}
memcpy(&info.pixel_type, p, sizeof(int));
p += 4;
info.p_linear = static_cast<unsigned char>(p[0]); // uchar
p += 1 + 3; // reserved: uchar[3]
memcpy(&info.x_sampling, p, sizeof(int)); // int
p += 4;
memcpy(&info.y_sampling, p, sizeof(int)); // int
p += 4;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.pixel_type));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.x_sampling));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.y_sampling));
channels.push_back(info);
}
return true;
}
static void WriteChannelInfo(std::vector<unsigned char> &data,
const std::vector<ChannelInfo> &channels) {
size_t sz = 0;
// Calculate total size.
for (size_t c = 0; c < channels.size(); c++) {
sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0
sz += 16; // 4 * int
}
data.resize(sz + 1);
unsigned char *p = &data.at(0);
for (size_t c = 0; c < channels.size(); c++) {
memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str()));
p += strlen(channels[c].name.c_str());
(*p) = '\0';
p++;
int pixel_type = channels[c].pixel_type;
int x_sampling = channels[c].x_sampling;
int y_sampling = channels[c].y_sampling;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&pixel_type));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&x_sampling));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y_sampling));
memcpy(p, &pixel_type, sizeof(int));
p += sizeof(int);
(*p) = channels[c].p_linear;
p += 4;
memcpy(p, &x_sampling, sizeof(int));
p += sizeof(int);
memcpy(p, &y_sampling, sizeof(int));
p += sizeof(int);
}
(*p) = '\0';
}
static void CompressZip(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
#if TINYEXR_USE_MINIZ
//
// Compress the data using miniz
//
miniz::mz_ulong outSize = miniz::mz_compressBound(src_size);
int ret = miniz::mz_compress(
dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)),
src_size);
assert(ret == miniz::MZ_OK);
(void)ret;
compressedSize = outSize;
#else
uLong outSize = compressBound(static_cast<uLong>(src_size));
int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)),
src_size);
assert(ret == Z_OK);
compressedSize = outSize;
#endif
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressZip(unsigned char *dst,
unsigned long *uncompressed_size /* inout */,
const unsigned char *src, unsigned long src_size) {
if ((*uncompressed_size) == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
std::vector<unsigned char> tmpBuf(*uncompressed_size);
#if TINYEXR_USE_MINIZ
int ret =
miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (miniz::MZ_OK != ret) {
return false;
}
#else
int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (Z_OK != ret) {
return false;
}
#endif
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size);
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(*uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + (*uncompressed_size);
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
// RLE code from OpenEXR --------------------------------------
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wsign-conversion"
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning( \
disable : 4267) // 'argument': conversion from '__int64' to 'int',
// possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
const int MIN_RUN_LENGTH = 3;
const int MAX_RUN_LENGTH = 127;
//
// Compress an array of bytes, using run-length encoding,
// and return the length of the compressed data.
//
static int rleCompress(int inLength, const char in[], signed char out[]) {
const char *inEnd = in + inLength;
const char *runStart = in;
const char *runEnd = in + 1;
signed char *outWrite = out;
while (runStart < inEnd) {
while (runEnd < inEnd && *runStart == *runEnd &&
runEnd - runStart - 1 < MAX_RUN_LENGTH) {
++runEnd;
}
if (runEnd - runStart >= MIN_RUN_LENGTH) {
//
// Compressable run
//
*outWrite++ = static_cast<char>(runEnd - runStart) - 1;
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart));
runStart = runEnd;
} else {
//
// Uncompressable run
//
while (runEnd < inEnd &&
((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) ||
(runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) &&
runEnd - runStart < MAX_RUN_LENGTH) {
++runEnd;
}
*outWrite++ = static_cast<char>(runStart - runEnd);
while (runStart < runEnd) {
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart++));
}
}
++runEnd;
}
return static_cast<int>(outWrite - out);
}
//
// Uncompress an array of bytes compressed with rleCompress().
// Returns the length of the oncompressed data, or 0 if the
// length of the uncompressed data would be more than maxLength.
//
static int rleUncompress(int inLength, int maxLength, const signed char in[],
char out[]) {
char *outStart = out;
while (inLength > 0) {
if (*in < 0) {
int count = -(static_cast<int>(*in++));
inLength -= count + 1;
if (0 > (maxLength -= count)) return 0;
memcpy(out, in, count);
out += count;
in += count;
} else {
int count = *in++;
inLength -= 2;
if (0 > (maxLength -= count + 1)) return 0;
memset(out, *reinterpret_cast<const char *>(in), count + 1);
out += count + 1;
in++;
}
}
return static_cast<int>(out - outStart);
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
// End of RLE code from OpenEXR -----------------------------------
static void CompressRle(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// outSize will be (srcSiz * 3) / 2 at max.
int outSize = rleCompress(static_cast<int>(src_size),
reinterpret_cast<const char *>(&tmpBuf.at(0)),
reinterpret_cast<signed char *>(dst));
assert(outSize > 0);
compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize);
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static void DecompressRle(unsigned char *dst,
const unsigned long uncompressed_size,
const unsigned char *src, unsigned long src_size) {
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return;
}
std::vector<unsigned char> tmpBuf(uncompressed_size);
int ret = rleUncompress(static_cast<int>(src_size),
static_cast<int>(uncompressed_size),
reinterpret_cast<const signed char *>(src),
reinterpret_cast<char *>(&tmpBuf.at(0)));
assert(ret == static_cast<int>(uncompressed_size));
(void)ret;
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + uncompressed_size;
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + uncompressed_size;
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
}
#if TINYEXR_USE_PIZ
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#endif
//
// PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp
//
// -----------------------------------------------------------------
// Copyright (c) 2004, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC)
// (3 clause BSD license)
//
struct PIZChannelData {
unsigned short *start;
unsigned short *end;
int nx;
int ny;
int ys;
int size;
};
//-----------------------------------------------------------------------------
//
// 16-bit Haar Wavelet encoding and decoding
//
// The source code in this file is derived from the encoding
// and decoding routines written by Christian Rouet for his
// PIZ image file format.
//
//-----------------------------------------------------------------------------
//
// Wavelet basis functions without modulo arithmetic; they produce
// the best compression ratios when the wavelet-transformed data are
// Huffman-encoded, but the wavelet transform works only for 14-bit
// data (untransformed data values must be less than (1 << 14)).
//
inline void wenc14(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
short as = static_cast<short>(a);
short bs = static_cast<short>(b);
short ms = (as + bs) >> 1;
short ds = as - bs;
l = static_cast<unsigned short>(ms);
h = static_cast<unsigned short>(ds);
}
inline void wdec14(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
short ls = static_cast<short>(l);
short hs = static_cast<short>(h);
int hi = hs;
int ai = ls + (hi & 1) + (hi >> 1);
short as = static_cast<short>(ai);
short bs = static_cast<short>(ai - hi);
a = static_cast<unsigned short>(as);
b = static_cast<unsigned short>(bs);
}
//
// Wavelet basis functions with modulo arithmetic; they work with full
// 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't
// compress the data quite as well.
//
const int NBITS = 16;
const int A_OFFSET = 1 << (NBITS - 1);
const int M_OFFSET = 1 << (NBITS - 1);
const int MOD_MASK = (1 << NBITS) - 1;
inline void wenc16(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
int ao = (a + A_OFFSET) & MOD_MASK;
int m = ((ao + b) >> 1);
int d = ao - b;
if (d < 0) m = (m + M_OFFSET) & MOD_MASK;
d &= MOD_MASK;
l = static_cast<unsigned short>(m);
h = static_cast<unsigned short>(d);
}
inline void wdec16(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
int m = l;
int d = h;
int bb = (m - (d >> 1)) & MOD_MASK;
int aa = (d + bb - A_OFFSET) & MOD_MASK;
b = static_cast<unsigned short>(bb);
a = static_cast<unsigned short>(aa);
}
//
// 2D Wavelet encoding:
//
static void wav2Encode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1; // == 1 << level
int p2 = 2; // == 1 << (level+1)
//
// Hierachical loop on smaller dimension n
//
while (p2 <= n) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet encoding
//
if (w14) {
wenc14(*px, *p01, i00, i01);
wenc14(*p10, *p11, i10, i11);
wenc14(i00, i10, *px, *p10);
wenc14(i01, i11, *p01, *p11);
} else {
wenc16(*px, *p01, i00, i01);
wenc16(*p10, *p11, i10, i11);
wenc16(i00, i10, *px, *p10);
wenc16(i01, i11, *p01, *p11);
}
}
//
// Encode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wenc14(*px, *p10, i00, *p10);
else
wenc16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Encode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wenc14(*px, *p01, i00, *p01);
else
wenc16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p = p2;
p2 <<= 1;
}
}
//
// 2D Wavelet decoding:
//
static void wav2Decode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1;
int p2;
//
// Search max level
//
while (p <= n) p <<= 1;
p >>= 1;
p2 = p;
p >>= 1;
//
// Hierarchical loop on smaller dimension n
//
while (p >= 1) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet decoding
//
if (w14) {
wdec14(*px, *p10, i00, i10);
wdec14(*p01, *p11, i01, i11);
wdec14(i00, i01, *px, *p01);
wdec14(i10, i11, *p10, *p11);
} else {
wdec16(*px, *p10, i00, i10);
wdec16(*p01, *p11, i01, i11);
wdec16(i00, i01, *px, *p01);
wdec16(i10, i11, *p10, *p11);
}
}
//
// Decode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wdec14(*px, *p10, i00, *p10);
else
wdec16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Decode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wdec14(*px, *p01, i00, *p01);
else
wdec16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p2 = p;
p >>= 1;
}
}
//-----------------------------------------------------------------------------
//
// 16-bit Huffman compression and decompression.
//
// The source code in this file is derived from the 8-bit
// Huffman compression and decompression routines written
// by Christian Rouet for his PIZ image file format.
//
//-----------------------------------------------------------------------------
// Adds some modification for tinyexr.
const int HUF_ENCBITS = 16; // literal (value) bit length
const int HUF_DECBITS = 14; // decoding bit size (>= 8)
const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size
const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size
const int HUF_DECMASK = HUF_DECSIZE - 1;
struct HufDec { // short code long code
//-------------------------------
int len : 8; // code length 0
int lit : 24; // lit p size
int *p; // 0 lits
};
inline long long hufLength(long long code) { return code & 63; }
inline long long hufCode(long long code) { return code >> 6; }
inline void outputBits(int nBits, long long bits, long long &c, int &lc,
char *&out) {
c <<= nBits;
lc += nBits;
c |= bits;
while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8)));
}
inline long long getBits(int nBits, long long &c, int &lc, const char *&in) {
while (lc < nBits) {
c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++));
lc += 8;
}
lc -= nBits;
return (c >> lc) & ((1 << nBits) - 1);
}
//
// ENCODING TABLE BUILDING & (UN)PACKING
//
//
// Build a "canonical" Huffman code table:
// - for each (uncompressed) symbol, hcode contains the length
// of the corresponding code (in the compressed data)
// - canonical codes are computed and stored in hcode
// - the rules for constructing canonical codes are as follows:
// * shorter codes (if filled with zeroes to the right)
// have a numerically higher value than longer codes
// * for codes with the same length, numerical values
// increase with numerical symbol values
// - because the canonical code table can be constructed from
// symbol lengths alone, the code table can be transmitted
// without sending the actual code values
// - see http://www.compressconsult.com/huffman/
//
static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) {
long long n[59];
//
// For each i from 0 through 58, count the
// number of different codes of length i, and
// store the count in n[i].
//
for (int i = 0; i <= 58; ++i) n[i] = 0;
for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1;
//
// For each i from 58 through 1, compute the
// numerically lowest code with length i, and
// store that code in n[i].
//
long long c = 0;
for (int i = 58; i > 0; --i) {
long long nc = ((c + n[i]) >> 1);
n[i] = c;
c = nc;
}
//
// hcode[i] contains the length, l, of the
// code for symbol i. Assign the next available
// code of length l to the symbol and store both
// l and the code in hcode[i].
//
for (int i = 0; i < HUF_ENCSIZE; ++i) {
int l = static_cast<int>(hcode[i]);
if (l > 0) hcode[i] = l | (n[l]++ << 6);
}
}
//
// Compute Huffman codes (based on frq input) and store them in frq:
// - code structure is : [63:lsb - 6:msb] | [5-0: bit length];
// - max code length is 58 bits;
// - codes outside the range [im-iM] have a null length (unused values);
// - original frequencies are destroyed;
// - encoding tables are used by hufEncode() and hufBuildDecTable();
//
struct FHeapCompare {
bool operator()(long long *a, long long *b) { return *a > *b; }
};
static void hufBuildEncTable(
long long *frq, // io: input frequencies [HUF_ENCSIZE], output table
int *im, // o: min frq index
int *iM) // o: max frq index
{
//
// This function assumes that when it is called, array frq
// indicates the frequency of all possible symbols in the data
// that are to be Huffman-encoded. (frq[i] contains the number
// of occurrences of symbol i in the data.)
//
// The loop below does three things:
//
// 1) Finds the minimum and maximum indices that point
// to non-zero entries in frq:
//
// frq[im] != 0, and frq[i] == 0 for all i < im
// frq[iM] != 0, and frq[i] == 0 for all i > iM
//
// 2) Fills array fHeap with pointers to all non-zero
// entries in frq.
//
// 3) Initializes array hlink such that hlink[i] == i
// for all array entries.
//
int hlink[HUF_ENCSIZE];
long long *fHeap[HUF_ENCSIZE];
*im = 0;
while (!frq[*im]) (*im)++;
int nf = 0;
for (int i = *im; i < HUF_ENCSIZE; i++) {
hlink[i] = i;
if (frq[i]) {
fHeap[nf] = &frq[i];
nf++;
*iM = i;
}
}
//
// Add a pseudo-symbol, with a frequency count of 1, to frq;
// adjust the fHeap and hlink array accordingly. Function
// hufEncode() uses the pseudo-symbol for run-length encoding.
//
(*iM)++;
frq[*iM] = 1;
fHeap[nf] = &frq[*iM];
nf++;
//
// Build an array, scode, such that scode[i] contains the number
// of bits assigned to symbol i. Conceptually this is done by
// constructing a tree whose leaves are the symbols with non-zero
// frequency:
//
// Make a heap that contains all symbols with a non-zero frequency,
// with the least frequent symbol on top.
//
// Repeat until only one symbol is left on the heap:
//
// Take the two least frequent symbols off the top of the heap.
// Create a new node that has first two nodes as children, and
// whose frequency is the sum of the frequencies of the first
// two nodes. Put the new node back into the heap.
//
// The last node left on the heap is the root of the tree. For each
// leaf node, the distance between the root and the leaf is the length
// of the code for the corresponding symbol.
//
// The loop below doesn't actually build the tree; instead we compute
// the distances of the leaves from the root on the fly. When a new
// node is added to the heap, then that node's descendants are linked
// into a single linear list that starts at the new node, and the code
// lengths of the descendants (that is, their distance from the root
// of the tree) are incremented by one.
//
std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
long long scode[HUF_ENCSIZE];
memset(scode, 0, sizeof(long long) * HUF_ENCSIZE);
while (nf > 1) {
//
// Find the indices, mm and m, of the two smallest non-zero frq
// values in fHeap, add the smallest frq to the second-smallest
// frq, and remove the smallest frq value from fHeap.
//
int mm = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
--nf;
int m = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
frq[m] += frq[mm];
std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
//
// The entries in scode are linked into lists with the
// entries in hlink serving as "next" pointers and with
// the end of a list marked by hlink[j] == j.
//
// Traverse the lists that start at scode[m] and scode[mm].
// For each element visited, increment the length of the
// corresponding code by one bit. (If we visit scode[j]
// during the traversal, then the code for symbol j becomes
// one bit longer.)
//
// Merge the lists that start at scode[m] and scode[mm]
// into a single list that starts at scode[m].
//
//
// Add a bit to all codes in the first list.
//
for (int j = m;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) {
//
// Merge the two lists.
//
hlink[j] = mm;
break;
}
}
//
// Add a bit to all codes in the second list
//
for (int j = mm;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) break;
}
}
//
// Build a canonical Huffman code table, replacing the code
// lengths in scode with (code, code length) pairs. Copy the
// code table from scode into frq.
//
hufCanonicalCodeTable(scode);
memcpy(frq, scode, sizeof(long long) * HUF_ENCSIZE);
}
//
// Pack an encoding table:
// - only code lengths, not actual codes, are stored
// - runs of zeroes are compressed as follows:
//
// unpacked packed
// --------------------------------
// 1 zero 0 (6 bits)
// 2 zeroes 59
// 3 zeroes 60
// 4 zeroes 61
// 5 zeroes 62
// n zeroes (6 or more) 63 n-6 (6 + 8 bits)
//
const int SHORT_ZEROCODE_RUN = 59;
const int LONG_ZEROCODE_RUN = 63;
const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN;
const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN;
static void hufPackEncTable(
const long long *hcode, // i : encoding table [HUF_ENCSIZE]
int im, // i : min hcode index
int iM, // i : max hcode index
char **pcode) // o: ptr to packed table (updated)
{
char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
int l = hufLength(hcode[im]);
if (l == 0) {
int zerun = 1;
while ((im < iM) && (zerun < LONGEST_LONG_RUN)) {
if (hufLength(hcode[im + 1]) > 0) break;
im++;
zerun++;
}
if (zerun >= 2) {
if (zerun >= SHORTEST_LONG_RUN) {
outputBits(6, LONG_ZEROCODE_RUN, c, lc, p);
outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p);
} else {
outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p);
}
continue;
}
}
outputBits(6, l, c, lc, p);
}
if (lc > 0) *p++ = (unsigned char)(c << (8 - lc));
*pcode = p;
}
//
// Unpack an encoding table packed by hufPackEncTable():
//
static bool hufUnpackEncTable(
const char **pcode, // io: ptr to packed table (updated)
int ni, // i : input size (in bytes)
int im, // i : min hcode index
int iM, // i : max hcode index
long long *hcode) // o: encoding table [HUF_ENCSIZE]
{
memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE);
const char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
if (p - *pcode > ni) {
return false;
}
long long l = hcode[im] = getBits(6, c, lc, p); // code length
if (l == (long long)LONG_ZEROCODE_RUN) {
if (p - *pcode > ni) {
return false;
}
int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
} else if (l >= (long long)SHORT_ZEROCODE_RUN) {
int zerun = l - SHORT_ZEROCODE_RUN + 2;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
}
}
*pcode = const_cast<char *>(p);
hufCanonicalCodeTable(hcode);
return true;
}
//
// DECODING TABLE BUILDING
//
//
// Clear a newly allocated decoding table so that it contains only zeroes.
//
static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
for (int i = 0; i < HUF_DECSIZE; i++) {
hdecod[i].len = 0;
hdecod[i].lit = 0;
hdecod[i].p = NULL;
}
// memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE);
}
//
// Build a decoding hash table based on the encoding table hcode:
// - short codes (<= HUF_DECBITS) are resolved with a single table access;
// - long code entry allocations are not optimized, because long codes are
// unfrequent;
// - decoding tables are used by hufDecode();
//
static bool hufBuildDecTable(const long long *hcode, // i : encoding table
int im, // i : min index in hcode
int iM, // i : max index in hcode
HufDec *hdecod) // o: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
//
// Init hashtable & loop on all codes.
// Assumes that hufClearDecTable(hdecod) has already been called.
//
for (; im <= iM; im++) {
long long c = hufCode(hcode[im]);
int l = hufLength(hcode[im]);
if (c >> l) {
//
// Error: c is supposed to be an l-bit code,
// but c contains a value that is greater
// than the largest l-bit number.
//
// invalidTableEntry();
return false;
}
if (l > HUF_DECBITS) {
//
// Long code: add a secondary entry
//
HufDec *pl = hdecod + (c >> (l - HUF_DECBITS));
if (pl->len) {
//
// Error: a short code has already
// been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->lit++;
if (pl->p) {
int *p = pl->p;
pl->p = new int[pl->lit];
for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i];
delete[] p;
} else {
pl->p = new int[1];
}
pl->p[pl->lit - 1] = im;
} else if (l) {
//
// Short code: init all primary entries
//
HufDec *pl = hdecod + (c << (HUF_DECBITS - l));
for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) {
if (pl->len || pl->p) {
//
// Error: a short code or a long code has
// already been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->len = l;
pl->lit = im;
}
}
}
return true;
}
//
// Free the long code entries of a decoding table built by hufBuildDecTable()
//
static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table
{
for (int i = 0; i < HUF_DECSIZE; i++) {
if (hdecod[i].p) {
delete[] hdecod[i].p;
hdecod[i].p = 0;
}
}
}
//
// ENCODING
//
inline void outputCode(long long code, long long &c, int &lc, char *&out) {
outputBits(hufLength(code), hufCode(code), c, lc, out);
}
inline void sendCode(long long sCode, int runCount, long long runCode,
long long &c, int &lc, char *&out) {
//
// Output a run of runCount instances of the symbol sCount.
// Output the symbols explicitly, or if that is shorter, output
// the sCode symbol once followed by a runCode symbol and runCount
// expressed as an 8-bit number.
//
if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) {
outputCode(sCode, c, lc, out);
outputCode(runCode, c, lc, out);
outputBits(8, runCount, c, lc, out);
} else {
while (runCount-- >= 0) outputCode(sCode, c, lc, out);
}
}
//
// Encode (compress) ni values based on the Huffman encoding table hcode:
//
static int hufEncode // return: output size (in bits)
(const long long *hcode, // i : encoding table
const unsigned short *in, // i : uncompressed input buffer
const int ni, // i : input buffer size (in bytes)
int rlc, // i : rl code
char *out) // o: compressed output buffer
{
char *outStart = out;
long long c = 0; // bits not yet written to out
int lc = 0; // number of valid bits in c (LSB)
int s = in[0];
int cs = 0;
//
// Loop on input values
//
for (int i = 1; i < ni; i++) {
//
// Count same values or send code
//
if (s == in[i] && cs < 255) {
cs++;
} else {
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
cs = 0;
}
s = in[i];
}
//
// Send remaining code
//
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
if (lc) *out = (c << (8 - lc)) & 0xff;
return (out - outStart) * 8 + lc;
}
//
// DECODING
//
//
// In order to force the compiler to inline them,
// getChar() and getCode() are implemented as macros
// instead of "inline" functions.
//
#define getChar(c, lc, in) \
{ \
c = (c << 8) | *(unsigned char *)(in++); \
lc += 8; \
}
#define getCode(po, rlc, c, lc, in, out, oe) \
{ \
if (po == rlc) { \
if (lc < 8) getChar(c, lc, in); \
\
lc -= 8; \
\
unsigned char cs = (c >> lc); \
\
if (out + cs > oe) return false; \
\
unsigned short s = out[-1]; \
\
while (cs-- > 0) *out++ = s; \
} else if (out < oe) { \
*out++ = po; \
} else { \
return false; \
} \
}
//
// Decode (uncompress) ni bits based on encoding & decoding tables:
//
static bool hufDecode(const long long *hcode, // i : encoding table
const HufDec *hdecod, // i : decoding table
const char *in, // i : compressed input buffer
int ni, // i : input size (in bits)
int rlc, // i : run-length code
int no, // i : expected output size (in bytes)
unsigned short *out) // o: uncompressed output buffer
{
long long c = 0;
int lc = 0;
unsigned short *outb = out;
unsigned short *oe = out + no;
const char *ie = in + (ni + 7) / 8; // input byte size
//
// Loop on input bytes
//
while (in < ie) {
getChar(c, lc, in);
//
// Access decoding table
//
while (lc >= HUF_DECBITS) {
const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK];
if (pl.len) {
//
// Get short code
//
lc -= pl.len;
getCode(pl.lit, rlc, c, lc, in, out, oe);
} else {
if (!pl.p) {
return false;
}
// invalidCode(); // wrong code
//
// Search long code
//
int j;
for (j = 0; j < pl.lit; j++) {
int l = hufLength(hcode[pl.p[j]]);
while (lc < l && in < ie) // get more bits
getChar(c, lc, in);
if (lc >= l) {
if (hufCode(hcode[pl.p[j]]) ==
((c >> (lc - l)) & (((long long)(1) << l) - 1))) {
//
// Found : get long code
//
lc -= l;
getCode(pl.p[j], rlc, c, lc, in, out, oe);
break;
}
}
}
if (j == pl.lit) {
return false;
// invalidCode(); // Not found
}
}
}
}
//
// Get remaining (short) codes
//
int i = (8 - ni) & 7;
c >>= i;
lc -= i;
while (lc > 0) {
const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK];
if (pl.len) {
lc -= pl.len;
getCode(pl.lit, rlc, c, lc, in, out, oe);
} else {
return false;
// invalidCode(); // wrong (long) code
}
}
if (out - outb != no) {
return false;
}
// notEnoughData ();
return true;
}
static void countFrequencies(long long freq[HUF_ENCSIZE],
const unsigned short data[/*n*/], int n) {
for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0;
for (int i = 0; i < n; ++i) ++freq[data[i]];
}
static void writeUInt(char buf[4], unsigned int i) {
unsigned char *b = (unsigned char *)buf;
b[0] = i;
b[1] = i >> 8;
b[2] = i >> 16;
b[3] = i >> 24;
}
static unsigned int readUInt(const char buf[4]) {
const unsigned char *b = (const unsigned char *)buf;
return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) |
((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000);
}
//
// EXTERNAL INTERFACE
//
static int hufCompress(const unsigned short raw[], int nRaw,
char compressed[]) {
if (nRaw == 0) return 0;
long long freq[HUF_ENCSIZE];
countFrequencies(freq, raw, nRaw);
int im = 0;
int iM = 0;
hufBuildEncTable(freq, &im, &iM);
char *tableStart = compressed + 20;
char *tableEnd = tableStart;
hufPackEncTable(freq, im, iM, &tableEnd);
int tableLength = tableEnd - tableStart;
char *dataStart = tableEnd;
int nBits = hufEncode(freq, raw, nRaw, iM, dataStart);
int data_length = (nBits + 7) / 8;
writeUInt(compressed, im);
writeUInt(compressed + 4, iM);
writeUInt(compressed + 8, tableLength);
writeUInt(compressed + 12, nBits);
writeUInt(compressed + 16, 0); // room for future extensions
return dataStart + data_length - compressed;
}
static bool hufUncompress(const char compressed[], int nCompressed,
unsigned short raw[], int nRaw) {
if (nCompressed == 0) {
if (nRaw != 0) return false;
return false;
}
int im = readUInt(compressed);
int iM = readUInt(compressed + 4);
// int tableLength = readUInt (compressed + 8);
int nBits = readUInt(compressed + 12);
if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false;
const char *ptr = compressed + 20;
//
// Fast decoder needs at least 2x64-bits of compressed data, and
// needs to be run-able on this platform. Otherwise, fall back
// to the original decoder
//
// if (FastHufDecoder::enabled() && nBits > 128)
//{
// FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM);
// fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw);
//}
// else
{
std::vector<long long> freq(HUF_ENCSIZE);
std::vector<HufDec> hdec(HUF_DECSIZE);
hufClearDecTable(&hdec.at(0));
hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM,
&freq.at(0));
{
if (nBits > 8 * (nCompressed - (ptr - compressed))) {
return false;
}
hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0));
hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, nRaw, raw);
}
// catch (...)
//{
// hufFreeDecTable (hdec);
// throw;
//}
hufFreeDecTable(&hdec.at(0));
}
return true;
}
//
// Functions to compress the range of values in the pixel data
//
const int USHORT_RANGE = (1 << 16);
const int BITMAP_SIZE = (USHORT_RANGE >> 3);
static void bitmapFromData(const unsigned short data[/*nData*/], int nData,
unsigned char bitmap[BITMAP_SIZE],
unsigned short &minNonZero,
unsigned short &maxNonZero) {
for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0;
for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7));
bitmap[0] &= ~1; // zero is not explicitly stored in
// the bitmap; we assume that the
// data always contain zeroes
minNonZero = BITMAP_SIZE - 1;
maxNonZero = 0;
for (int i = 0; i < BITMAP_SIZE; ++i) {
if (bitmap[i]) {
if (minNonZero > i) minNonZero = i;
if (maxNonZero < i) maxNonZero = i;
}
}
}
static unsigned short forwardLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7))))
lut[i] = k++;
else
lut[i] = 0;
}
return k - 1; // maximum value stored in lut[],
} // i.e. number of ones in bitmap minus 1
static unsigned short reverseLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i;
}
int n = k - 1;
while (k < USHORT_RANGE) lut[k++] = 0;
return n; // maximum k where lut[k] is non-zero,
} // i.e. number of ones in bitmap minus 1
static void applyLut(const unsigned short lut[USHORT_RANGE],
unsigned short data[/*nData*/], int nData) {
for (int i = 0; i < nData; ++i) data[i] = lut[data[i]];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif // __clang__
#ifdef _MSC_VER
#pragma warning(pop)
#endif
static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize,
const unsigned char *inPtr, size_t inSize,
const std::vector<ChannelInfo> &channelInfo,
int data_width, int num_lines) {
unsigned char bitmap[BITMAP_SIZE];
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
// Assume `inSize` is multiple of 2 or 4.
std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short));
std::vector<PIZChannelData> channelData(channelInfo.size());
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t c = 0; c < channelData.size(); c++) {
PIZChannelData &cd = channelData[c];
cd.start = tmpBufferEnd;
cd.end = cd.start;
cd.nx = data_width;
cd.ny = num_lines;
// cd.ys = c.channel().ySampling;
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
cd.size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += cd.nx * cd.ny * cd.size;
}
const unsigned char *ptr = inPtr;
for (int y = 0; y < num_lines; ++y) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(cd.end, ptr, n * sizeof(unsigned short));
ptr += n * sizeof(unsigned short);
cd.end += n;
}
}
bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), bitmap,
minNonZero, maxNonZero);
unsigned short lut[USHORT_RANGE];
unsigned short maxValue = forwardLutFromBitmap(bitmap, lut);
applyLut(lut, &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()));
//
// Store range compression info in _outBuffer
//
char *buf = reinterpret_cast<char *>(outPtr);
memcpy(buf, &minNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
memcpy(buf, &maxNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
if (minNonZero <= maxNonZero) {
memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero),
maxNonZero - minNonZero + 1);
buf += maxNonZero - minNonZero + 1;
}
//
// Apply wavelet encoding
//
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Apply Huffman encoding; append the result to _outBuffer
//
// length header(4byte), then huff data. Initialize length header with zero,
// then later fill it by `length`.
char *lengthPtr = buf;
int zero = 0;
memcpy(buf, &zero, sizeof(int));
buf += sizeof(int);
int length =
hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf);
memcpy(lengthPtr, &length, sizeof(int));
(*outSize) = static_cast<unsigned int>(
(reinterpret_cast<unsigned char *>(buf) - outPtr) +
static_cast<unsigned int>(length));
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if ((*outSize) >= inSize) {
(*outSize) = static_cast<unsigned int>(inSize);
memcpy(outPtr, inPtr, inSize);
}
return true;
}
static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr,
size_t tmpBufSize, size_t inLen, int num_channels,
const EXRChannelInfo *channels, int data_width,
int num_lines) {
if (inLen == tmpBufSize) {
// Data is not compressed(Issue 40).
memcpy(outPtr, inPtr, inLen);
return true;
}
unsigned char bitmap[BITMAP_SIZE];
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
memset(bitmap, 0, BITMAP_SIZE);
const unsigned char *ptr = inPtr;
minNonZero = *(reinterpret_cast<const unsigned short *>(ptr));
maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2));
ptr += 4;
if (maxNonZero >= BITMAP_SIZE) {
return false;
}
if (minNonZero <= maxNonZero) {
memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr,
maxNonZero - minNonZero + 1);
ptr += maxNonZero - minNonZero + 1;
}
unsigned short lut[USHORT_RANGE];
memset(lut, 0, sizeof(unsigned short) * USHORT_RANGE);
unsigned short maxValue = reverseLutFromBitmap(bitmap, lut);
//
// Huffman decoding
//
int length;
length = *(reinterpret_cast<const int *>(ptr));
ptr += sizeof(int);
std::vector<unsigned short> tmpBuffer(tmpBufSize);
hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer.at(0),
static_cast<int>(tmpBufSize));
//
// Wavelet decoding
//
std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels));
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) {
const EXRChannelInfo &chan = channels[i];
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
channelData[i].start = tmpBufferEnd;
channelData[i].end = channelData[i].start;
channelData[i].nx = data_width;
channelData[i].ny = num_lines;
// channelData[i].ys = 1;
channelData[i].size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size;
}
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Expand the pixel data to their original range
//
applyLut(lut, &tmpBuffer.at(0), static_cast<int>(tmpBufSize));
for (int y = 0; y < num_lines; y++) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short)));
outPtr += n * sizeof(unsigned short);
cd.end += n;
}
}
return true;
}
#endif // TINYEXR_USE_PIZ
#if TINYEXR_USE_ZFP
struct ZFPCompressionParam {
double rate;
int precision;
double tolerance;
int type; // TINYEXR_ZFP_COMPRESSIONTYPE_*
ZFPCompressionParam() {
type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE;
rate = 2.0;
precision = 0;
tolerance = 0.0f;
}
};
bool FindZFPCompressionParam(ZFPCompressionParam *param,
const EXRAttribute *attributes,
int num_attributes) {
bool foundType = false;
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionType") == 0) &&
(attributes[i].size == 1)) {
param->type = static_cast<int>(attributes[i].value[0]);
foundType = true;
}
}
if (!foundType) {
return false;
}
if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) &&
(attributes[i].size == 8)) {
param->rate = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) &&
(attributes[i].size == 4)) {
param->rate = *(reinterpret_cast<int *>(attributes[i].value));
return true;
}
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) &&
(attributes[i].size == 8)) {
param->tolerance = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
} else {
assert(0);
}
return false;
}
// Assume pixel format is FLOAT for all channels.
static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines,
int num_channels, const unsigned char *src,
unsigned long src_size,
const ZFPCompressionParam ¶m) {
size_t uncompressed_size = dst_width * dst_num_lines * num_channels;
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
}
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((dst_width % 4) == 0);
assert((dst_num_lines % 4) == 0);
if ((dst_width & 3U) || (dst_num_lines & 3U)) {
return false;
}
field =
zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)),
zfp_type_float, dst_width, dst_num_lines * num_channels);
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimention */ 2,
/* write random access */ 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision, zfp_type_float);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
std::vector<unsigned char> buf(buf_size);
memcpy(&buf.at(0), src, src_size);
bitstream *stream = stream_open(&buf.at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_stream_rewind(zfp);
size_t image_size = dst_width * dst_num_lines;
for (int c = 0; c < num_channels; c++) {
// decompress 4x4 pixel block.
for (int y = 0; y < dst_num_lines; y += 4) {
for (int x = 0; x < dst_width; x += 4) {
float fblock[16];
zfp_decode_block_float_2(zfp, fblock);
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
dst[c * image_size + ((y + j) * dst_width + (x + i))] =
fblock[j * 4 + i];
}
}
}
}
}
zfp_field_free(field);
zfp_stream_close(zfp);
stream_close(stream);
return true;
}
// Assume pixel format is FLOAT for all channels.
bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize,
const float *inPtr, int width, int num_lines, int num_channels,
const ZFPCompressionParam ¶m) {
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((width % 4) == 0);
assert((num_lines % 4) == 0);
if ((width & 3U) || (num_lines & 3U)) {
return false;
}
// create input array.
field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)),
zfp_type_float, width, num_lines * num_channels);
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision, zfp_type_float);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
outBuf->resize(buf_size);
bitstream *stream = stream_open(&outBuf->at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_field_free(field);
size_t image_size = width * num_lines;
for (int c = 0; c < num_channels; c++) {
// compress 4x4 pixel block.
for (int y = 0; y < num_lines; y += 4) {
for (int x = 0; x < width; x += 4) {
float fblock[16];
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
fblock[j * 4 + i] =
inPtr[c * image_size + ((y + j) * width + (x + i))];
}
}
zfp_encode_block_float_2(zfp, fblock);
}
}
}
zfp_stream_flush(zfp);
(*outSize) = zfp_stream_compressed_size(zfp);
zfp_stream_close(zfp);
return true;
}
#endif
//
// -----------------------------------------------------------------
//
static bool DecodePixelData(/* out */ unsigned char **out_images,
const int *requested_pixel_types,
const unsigned char *data_ptr, size_t data_len,
int compression_type, int line_order, int width,
int height, int x_stride, int y, int line_no,
int num_lines, size_t pixel_data_size,
size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ
#if TINYEXR_USE_PIZ
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(
static_cast<size_t>(width * num_lines) * pixel_data_size));
size_t tmpBufLen = outBuf.size();
bool ret = tinyexr::DecompressPiz(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen,
data_len, static_cast<int>(num_channels), channels, width, num_lines);
assert(ret);
(void)ret;
// For PIZ_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
FP16 hf;
hf.u = line_ptr[u];
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val = line_ptr[u];
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(&outBuf.at(
v * pixel_data_size * static_cast<size_t>(x_stride) +
channel_offset_list[c] * static_cast<size_t>(x_stride)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val = line_ptr[u];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
}
}
#else
assert(0 && "PIZ is enabled in this build");
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS ||
compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
assert(dstLen > 0);
if (!tinyexr::DecompressZip(reinterpret_cast<unsigned char *>(&outBuf.at(0)),
&dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For ZIP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
hf.u = line_ptr[u];
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val = line_ptr[u];
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val = line_ptr[u];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
assert(dstLen > 0);
tinyexr::DecompressRle(reinterpret_cast<unsigned char *>(&outBuf.at(0)),
dstLen, data_ptr,
static_cast<unsigned long>(data_len));
// For RLE_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
hf.u = line_ptr[u];
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val = line_ptr[u];
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val = line_ptr[u];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
if (!FindZFPCompressionParam(&zfp_compression_param, attributes,
num_attributes)) {
assert(0);
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = outBuf.size();
assert(dstLen > 0);
tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width,
num_lines, num_channels, data_ptr,
static_cast<unsigned long>(data_len),
zfp_compression_param);
// For ZFP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val = line_ptr[u];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
#else
(void)attributes;
(void)num_attributes;
(void)num_channels;
assert(0);
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
for (size_t c = 0; c < num_channels; c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
const unsigned short *line_ptr =
reinterpret_cast<const unsigned short *>(
data_ptr +
c * static_cast<size_t>(width) * sizeof(unsigned short));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *outLine =
reinterpret_cast<unsigned short *>(out_images[c]);
if (line_order == 0) {
outLine += y * x_stride;
} else {
outLine += (height - 1 - y) * x_stride;
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
hf.u = line_ptr[u];
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
outLine[u] = hf.u;
}
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += y * x_stride;
} else {
outLine += (height - 1 - y) * x_stride;
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
hf.u = line_ptr[u];
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
tinyexr::FP32 f32 = half_to_float(hf);
outLine[u] = f32.f;
}
} else {
assert(0);
return false;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
const float *line_ptr = reinterpret_cast<const float *>(
data_ptr + c * static_cast<size_t>(width) * sizeof(float));
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += y * x_stride;
} else {
outLine += (height - 1 - y) * x_stride;
}
for (int u = 0; u < width; u++) {
float val = line_ptr[u];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>(
data_ptr + c * static_cast<size_t>(width) * sizeof(unsigned int));
unsigned int *outLine = reinterpret_cast<unsigned int *>(out_images[c]);
if (line_order == 0) {
outLine += y * x_stride;
} else {
outLine += (height - 1 - y) * x_stride;
}
for (int u = 0; u < width; u++) {
unsigned int val = line_ptr[u];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
}
}
}
return true;
}
static void DecodeTiledPixelData(
unsigned char **out_images, int *width, int *height,
const int *requested_pixel_types, const unsigned char *data_ptr,
size_t data_len, int compression_type, int line_order, int data_width,
int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x,
int tile_size_y, size_t pixel_data_size, size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
assert(tile_offset_x * tile_size_x < data_width);
assert(tile_offset_y * tile_size_y < data_height);
// Compute actual image size in a tile.
if ((tile_offset_x + 1) * tile_size_x >= data_width) {
(*width) = data_width - (tile_offset_x * tile_size_x);
} else {
(*width) = tile_size_x;
}
if ((tile_offset_y + 1) * tile_size_y >= data_height) {
(*height) = data_height - (tile_offset_y * tile_size_y);
} else {
(*height) = tile_size_y;
}
// Image size = tile size.
DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len,
compression_type, line_order, (*width), tile_size_y,
/* stride */ tile_size_x, /* y */ 0, /* line_no */ 0,
(*height), pixel_data_size, num_attributes, attributes,
num_channels, channels, channel_offset_list);
}
static void ComputeChannelLayout(std::vector<size_t> *channel_offset_list,
int *pixel_data_size, size_t *channel_offset,
int num_channels,
const EXRChannelInfo *channels) {
channel_offset_list->resize(static_cast<size_t>(num_channels));
(*pixel_data_size) = 0;
(*channel_offset) = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
(*channel_offset_list)[c] = (*channel_offset);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
(*pixel_data_size) += sizeof(unsigned short);
(*channel_offset) += sizeof(unsigned short);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
(*pixel_data_size) += sizeof(float);
(*channel_offset) += sizeof(float);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
(*pixel_data_size) += sizeof(unsigned int);
(*channel_offset) += sizeof(unsigned int);
} else {
assert(0);
}
}
}
static unsigned char **AllocateImage(int num_channels,
const EXRChannelInfo *channels,
const int *requested_pixel_types,
int data_width, int data_height) {
unsigned char **images =
reinterpret_cast<unsigned char **>(static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(num_channels))));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
size_t data_len =
static_cast<size_t>(data_width) * static_cast<size_t>(data_height);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
// pixel_data_size += sizeof(unsigned short);
// channel_offset += sizeof(unsigned short);
// Alloc internal image for half type.
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
images[c] =
reinterpret_cast<unsigned char *>(static_cast<unsigned short *>(
malloc(sizeof(unsigned short) * data_len)));
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
// pixel_data_size += sizeof(float);
// channel_offset += sizeof(float);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
// pixel_data_size += sizeof(unsigned int);
// channel_offset += sizeof(unsigned int);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len)));
} else {
assert(0);
}
}
return images;
}
static int ParseEXRHeader(HeaderInfo *info, bool *empty_header,
const EXRVersion *version, std::string *err,
const unsigned char *buf, size_t size) {
const char *marker = reinterpret_cast<const char *>(&buf[0]);
if (empty_header) {
(*empty_header) = false;
}
if (version->multipart) {
if (size > 0 && marker[0] == '\0') {
// End of header list.
if (empty_header) {
(*empty_header) = true;
}
return TINYEXR_SUCCESS;
}
}
// According to the spec, the header of every OpenEXR file must contain at
// least the following attributes:
//
// channels chlist
// compression compression
// dataWindow box2i
// displayWindow box2i
// lineOrder lineOrder
// pixelAspectRatio float
// screenWindowCenter v2f
// screenWindowWidth float
bool has_channels = false;
bool has_compression = false;
bool has_data_window = false;
bool has_display_window = false;
bool has_line_order = false;
bool has_pixel_aspect_ratio = false;
bool has_screen_window_center = false;
bool has_screen_window_width = false;
info->data_window[0] = 0;
info->data_window[1] = 0;
info->data_window[2] = 0;
info->data_window[3] = 0;
info->line_order = 0; // @fixme
info->display_window[0] = 0;
info->display_window[1] = 0;
info->display_window[2] = 0;
info->display_window[3] = 0;
info->screen_window_center[0] = 0.0f;
info->screen_window_center[1] = 0.0f;
info->screen_window_width = -1.0f;
info->pixel_aspect_ratio = -1.0f;
info->tile_size_x = -1;
info->tile_size_y = -1;
info->tile_level_mode = -1;
info->tile_rounding_mode = -1;
info->attributes.clear();
// Read attributes
size_t orig_size = size;
for (;;) {
if (0 == size) {
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (version->tiled && attr_name.compare("tiles") == 0) {
unsigned int x_size, y_size;
unsigned char tile_mode;
assert(data.size() == 9);
memcpy(&x_size, &data.at(0), sizeof(int));
memcpy(&y_size, &data.at(4), sizeof(int));
tile_mode = data[8];
tinyexr::swap4(&x_size);
tinyexr::swap4(&y_size);
info->tile_size_x = static_cast<int>(x_size);
info->tile_size_y = static_cast<int>(y_size);
// mode = levelMode + roundingMode * 16
info->tile_level_mode = tile_mode & 0x3;
info->tile_rounding_mode = (tile_mode >> 4) & 0x1;
} else if (attr_name.compare("compression") == 0) {
bool ok = false;
if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) {
ok = true;
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
ok = true;
#else
if (err) {
(*err) = "PIZ compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
ok = true;
#else
if (err) {
(*err) = "ZFP compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (!ok) {
if (err) {
(*err) = "Unknown compression type.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->compression_type = static_cast<int>(data[0]);
has_compression = true;
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!ReadChannelInfo(info->channels, data)) {
if (err) {
(*err) = "Failed to parse channel info.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (info->channels.size() < 1) {
if (err) {
(*err) = "# of channels is zero.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
has_channels = true;
} else if (attr_name.compare("dataWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->data_window[0], &data.at(0), sizeof(int));
memcpy(&info->data_window[1], &data.at(4), sizeof(int));
memcpy(&info->data_window[2], &data.at(8), sizeof(int));
memcpy(&info->data_window[3], &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[3]));
has_data_window = true;
}
} else if (attr_name.compare("displayWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->display_window[0], &data.at(0), sizeof(int));
memcpy(&info->display_window[1], &data.at(4), sizeof(int));
memcpy(&info->display_window[2], &data.at(8), sizeof(int));
memcpy(&info->display_window[3], &data.at(12), sizeof(int));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[0]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[1]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[2]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[3]));
has_display_window = true;
}
} else if (attr_name.compare("lineOrder") == 0) {
if (data.size() >= 1) {
info->line_order = static_cast<int>(data[0]);
has_line_order = true;
}
} else if (attr_name.compare("pixelAspectRatio") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->pixel_aspect_ratio));
has_pixel_aspect_ratio = true;
}
} else if (attr_name.compare("screenWindowCenter") == 0) {
if (data.size() >= 8) {
memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float));
memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_center[0]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_center[1]));
has_screen_window_center = true;
}
} else if (attr_name.compare("screenWindowWidth") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->screen_window_width, &data.at(0), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_width));
has_screen_window_width = true;
}
} else if (attr_name.compare("chunkCount") == 0) {
if (data.size() >= sizeof(int)) {
memcpy(&info->chunk_count, &data.at(0), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->chunk_count));
}
} else {
// Custom attribute(up to TINYEXR_MAX_ATTRIBUTES)
if (info->attributes.size() < TINYEXR_MAX_ATTRIBUTES) {
EXRAttribute attrib;
#ifdef _MSC_VER
strncpy_s(attrib.name, attr_name.c_str(), 255);
strncpy_s(attrib.type, attr_type.c_str(), 255);
#else
strncpy(attrib.name, attr_name.c_str(), 255);
strncpy(attrib.type, attr_type.c_str(), 255);
#endif
attrib.name[255] = '\0';
attrib.type[255] = '\0';
attrib.size = static_cast<int>(data.size());
attrib.value = static_cast<unsigned char *>(malloc(data.size()));
memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0),
data.size());
info->attributes.push_back(attrib);
}
}
}
// Check if required attributes exist
{
std::stringstream ss_err;
if (!has_compression) {
ss_err << "\"compression\" attribute not found in the header."
<< std::endl;
}
if (!has_channels) {
ss_err << "\"channels\" attribute not found in the header." << std::endl;
}
if (!has_line_order) {
ss_err << "\"lineOrder\" attribute not found in the header." << std::endl;
}
if (!has_display_window) {
ss_err << "\"displayWindow\" attribute not found in the header."
<< std::endl;
}
if (!has_data_window) {
ss_err << "\"dataWindow\" attribute not found in the header or invalid."
<< std::endl;
}
if (!has_pixel_aspect_ratio) {
ss_err << "\"pixelAspectRatio\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_width) {
ss_err << "\"screenWindowWidth\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_center) {
ss_err << "\"screenWindowCenter\" attribute not found in the header."
<< std::endl;
}
if (!(ss_err.str().empty())) {
if (err) {
(*err) += ss_err.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
}
info->header_len = static_cast<unsigned int>(orig_size - size);
return TINYEXR_SUCCESS;
}
// C++ HeaderInfo to C EXRHeader conversion.
static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) {
exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio;
exr_header->screen_window_center[0] = info.screen_window_center[0];
exr_header->screen_window_center[1] = info.screen_window_center[1];
exr_header->screen_window_width = info.screen_window_width;
exr_header->chunk_count = info.chunk_count;
exr_header->display_window[0] = info.display_window[0];
exr_header->display_window[1] = info.display_window[1];
exr_header->display_window[2] = info.display_window[2];
exr_header->display_window[3] = info.display_window[3];
exr_header->data_window[0] = info.data_window[0];
exr_header->data_window[1] = info.data_window[1];
exr_header->data_window[2] = info.data_window[2];
exr_header->data_window[3] = info.data_window[3];
exr_header->line_order = info.line_order;
exr_header->compression_type = info.compression_type;
exr_header->tile_size_x = info.tile_size_x;
exr_header->tile_size_y = info.tile_size_y;
exr_header->tile_level_mode = info.tile_level_mode;
exr_header->tile_rounding_mode = info.tile_rounding_mode;
exr_header->num_channels = static_cast<int>(info.channels.size());
exr_header->channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
#ifdef _MSC_VER
strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#else
strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#endif
// manually add '\0' for safety.
exr_header->channels[c].name[255] = '\0';
exr_header->channels[c].pixel_type = info.channels[c].pixel_type;
exr_header->channels[c].p_linear = info.channels[c].p_linear;
exr_header->channels[c].x_sampling = info.channels[c].x_sampling;
exr_header->channels[c].y_sampling = info.channels[c].y_sampling;
}
exr_header->pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->pixel_types[c] = info.channels[c].pixel_type;
}
// Initially fill with values of `pixel_types`
exr_header->requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->requested_pixel_types[c] = info.channels[c].pixel_type;
}
assert(info.attributes.size() < TINYEXR_MAX_ATTRIBUTES);
exr_header->num_custom_attributes = static_cast<int>(info.attributes.size());
for (size_t i = 0; i < info.attributes.size(); i++) {
memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name, 256);
memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type, 256);
exr_header->custom_attributes[i].size = info.attributes[i].size;
// Just copy poiner
exr_header->custom_attributes[i].value = info.attributes[i].value;
}
exr_header->header_len = info.header_len;
}
static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header,
const std::vector<tinyexr::tinyexr_uint64> &offsets,
const unsigned char *head, const size_t size) {
int num_channels = exr_header->num_channels;
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1;
int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1;
size_t num_blocks = offsets.size();
std::vector<size_t> channel_offset_list;
int pixel_data_size = 0;
size_t channel_offset = 0;
tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size,
&channel_offset, num_channels,
exr_header->channels);
bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety.
if (exr_header->tiled) {
size_t num_tiles = offsets.size(); // = # of blocks
exr_image->tiles = static_cast<EXRTile *>(
malloc(sizeof(EXRTile) * static_cast<size_t>(num_tiles)));
for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) {
// Allocate memory for each tile.
exr_image->tiles[tile_idx].images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
data_width, data_height);
// 16 byte: tile coordinates
// 4 byte : data size
// ~ : data(uncompressed or compressed)
if (offsets[tile_idx] + sizeof(int) * 5 > size) {
return TINYEXR_ERROR_INVALID_DATA;
}
size_t data_size = size - (offsets[tile_idx] + sizeof(int) * 5);
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]);
int tile_coordinates[4];
memcpy(tile_coordinates, data_ptr, sizeof(int) * 4);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[3]));
// @todo{ LoD }
if (tile_coordinates[2] != 0) {
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
if (tile_coordinates[3] != 0) {
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
int data_len;
memcpy(&data_len, data_ptr + 16,
sizeof(int)); // 16 = sizeof(tile_coordinates)
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len < 4 || size_t(data_len) > data_size) {
return TINYEXR_ERROR_INVALID_DATA;
}
// Move to data addr: 20 = 16 + 4;
data_ptr += 20;
tinyexr::DecodeTiledPixelData(
exr_image->tiles[tile_idx].images,
&(exr_image->tiles[tile_idx].width),
&(exr_image->tiles[tile_idx].height),
exr_header->requested_pixel_types, data_ptr,
static_cast<size_t>(data_len), exr_header->compression_type,
exr_header->line_order, data_width, data_height, tile_coordinates[0],
tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y,
static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels), exr_header->channels,
channel_offset_list);
exr_image->tiles[tile_idx].offset_x = tile_coordinates[0];
exr_image->tiles[tile_idx].offset_y = tile_coordinates[1];
exr_image->tiles[tile_idx].level_x = tile_coordinates[2];
exr_image->tiles[tile_idx].level_y = tile_coordinates[3];
exr_image->num_tiles = static_cast<int>(num_tiles);
}
} else { // scanline format
exr_image->images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
data_width, data_height);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int y = 0; y < static_cast<int>(num_blocks); y++) {
size_t y_idx = static_cast<size_t>(y);
if (offsets[y_idx] + sizeof(int) * 2 > size) {
invalid_data = true;
} else {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed or compressed)
size_t data_size = size - (offsets[y_idx] + sizeof(int) * 2);
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y_idx]);
int line_no;
memcpy(&line_no, data_ptr, sizeof(int));
int data_len;
memcpy(&data_len, data_ptr + 4, sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (size_t(data_len) > data_size) {
invalid_data = true;
} else {
int end_line_no = (std::min)(line_no + num_scanline_blocks,
(exr_header->data_window[3] + 1));
int num_lines = end_line_no - line_no;
//assert(num_lines > 0);
if (num_lines <= 0) {
invalid_data = true;
} else {
// Move to data addr: 8 = 4 + 4;
data_ptr += 8;
// Adjust line_no with data_window.bmin.y
line_no -= exr_header->data_window[1];
if (line_no < 0) {
invalid_data = true;
} else {
if (!tinyexr::DecodePixelData(
exr_image->images, exr_header->requested_pixel_types, data_ptr,
static_cast<size_t>(data_len), exr_header->compression_type,
exr_header->line_order, data_width, data_height, data_width, y,
line_no, num_lines, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels), exr_header->channels,
channel_offset_list)) {
invalid_data = true;
}
}
}
}
}
} // omp parallel
}
if (invalid_data) {
return TINYEXR_ERROR_INVALID_DATA;
}
// Overwrite `pixel_type` with `requested_pixel_type`.
{
for (int c = 0; c < exr_header->num_channels; c++) {
exr_header->pixel_types[c] = exr_header->requested_pixel_types[c];
}
}
{
exr_image->num_channels = num_channels;
exr_image->width = data_width;
exr_image->height = data_height;
}
return TINYEXR_SUCCESS;
}
static bool ReconstructLineOffsets(
std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n,
const unsigned char *head, const unsigned char *marker, const size_t size) {
assert(head < marker);
assert(offsets->size() == n);
for (size_t i = 0; i < n; i++) {
size_t offset = static_cast<size_t>(marker - head);
// Offset should not exceed whole EXR file/data size.
if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) {
return false;
}
int y;
unsigned int data_len;
memcpy(&y, marker, sizeof(int));
memcpy(&data_len, marker + 4, sizeof(unsigned int));
if (data_len >= size) {
return false;
}
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
(*offsets)[i] = offset;
marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len)
}
return true;
}
static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *head,
const unsigned char *marker, const size_t size,
const char **err) {
if (exr_image == NULL || exr_header == NULL || head == NULL ||
marker == NULL || (size <= tinyexr::kEXRVersionSize)) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
int data_width = exr_header->data_window[2] - exr_header->data_window[0];
if (data_width >= std::numeric_limits<int>::max()) {
// Issue 63
if (err) {
(*err) = "Invalid data window value.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
data_width++;
int data_height = exr_header->data_window[3] - exr_header->data_window[1];
if (data_height >= std::numeric_limits<int>::max()) {
if (err) {
(*err) = "Invalid data height value.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
if ((data_width < 0) || (data_height < 0)) {
if (err) {
(*err) = "Invalid data window value.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Read offset tables.
size_t num_blocks = 0;
if (exr_header->chunk_count > 0) {
// Use `chunkCount` attribute.
num_blocks = static_cast<size_t>(exr_header->chunk_count);
} else if (exr_header->tiled) {
// @todo { LoD }
size_t num_x_tiles = static_cast<size_t>(data_width) /
static_cast<size_t>(exr_header->tile_size_x);
if (num_x_tiles * static_cast<size_t>(exr_header->tile_size_x) <
static_cast<size_t>(data_width)) {
num_x_tiles++;
}
size_t num_y_tiles = static_cast<size_t>(data_height) /
static_cast<size_t>(exr_header->tile_size_y);
if (num_y_tiles * static_cast<size_t>(exr_header->tile_size_y) <
static_cast<size_t>(data_height)) {
num_y_tiles++;
}
num_blocks = num_x_tiles * num_y_tiles;
} else {
num_blocks = static_cast<size_t>(data_height) /
static_cast<size_t>(num_scanline_blocks);
if (num_blocks * static_cast<size_t>(num_scanline_blocks) <
static_cast<size_t>(data_height)) {
num_blocks++;
}
}
std::vector<tinyexr::tinyexr_uint64> offsets(num_blocks);
for (size_t y = 0; y < num_blocks; y++) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
if (err) {
(*err) = "Invalid offset value.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offsets[y] = offset;
}
// If line offsets are invalid, we try to reconstruct it.
// See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details.
for (size_t y = 0; y < num_blocks; y++) {
if (offsets[y] <= 0) {
// TODO(syoyo) Report as warning?
// if (err) {
// stringstream ss;
// ss << "Incomplete lineOffsets." << std::endl;
// (*err) += ss.str();
//}
bool ret =
ReconstructLineOffsets(&offsets, num_blocks, head, marker, size);
if (ret) {
// OK
break;
} else {
if (err) {
(*err) = "Cannot reconstruct lineOffset table.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
return DecodeChunk(exr_image, exr_header, offsets, head, size);
}
} // namespace tinyexr
int LoadEXR(float **out_rgba, int *width, int *height, const char *filename,
const char **err) {
if (out_rgba == NULL) {
if (err) {
(*err) = "Invalid argument.\n";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
InitEXRImage(&exr_image);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
if (err) {
(*err) = "Loading multipart or DeepImage is not supported yet.\n";
}
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
{
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
{
int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
} else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
} else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
} else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
if ((idxA == 0) && (idxR == -1) && (idxG == -1) && (idxB == -1)) {
// Alpha channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val = reinterpret_cast<float **>(exr_image.images)[0][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
} else {
// Assume RGB(A)
if (idxR == -1) {
if (err) {
(*err) = "R channel not found\n";
}
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
if (err) {
(*err) = "G channel not found\n";
}
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
if (err) {
(*err) = "B channel not found\n";
}
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_header == NULL) {
if (err) {
(*err) = "Invalid argument.\n";
}
// Invalid argument
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
if (err && !err_str.empty()) {
#ifdef _WIN32
(*err) = _strdup(err_str.c_str()); // May leak
#else
(*err) = strdup(err_str.c_str()); // May leak
#endif
}
}
ConvertHeader(exr_header, info);
// transfoer `tiled` from version.
exr_header->tiled = version->tiled;
return ret;
}
int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err) {
if (out_rgba == NULL || memory == NULL) {
if (err) {
(*err) = "Invalid argument.\n";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
int ret = ParseEXRVersionFromMemory(&exr_version, memory, size);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
InitEXRImage(&exr_image);
ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
} else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
} else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
} else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
if (idxR == -1) {
if (err) {
(*err) = "R channel not found\n";
}
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
if (err) {
(*err) = "G channel not found\n";
}
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
if (err) {
(*err) = "B channel not found\n";
}
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
if (err) {
(*err) = "Cannot read file.";
}
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize,
err);
}
int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *memory, const size_t size,
const char **err) {
if (exr_image == NULL || memory == NULL ||
(size < tinyexr::kEXRVersionSize)) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->header_len == 0) {
if (err) {
(*err) = "EXRHeader is not initialized.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
const unsigned char *head = memory;
const unsigned char *marker = reinterpret_cast<const unsigned char *>(
memory + exr_header->header_len +
8); // +8 for magic number + version header.
return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size,
err);
}
size_t SaveEXRImageToMemory(const EXRImage *exr_image,
const EXRHeader *exr_header,
unsigned char **memory_out, const char **err) {
if (exr_image == NULL || memory_out == NULL ||
exr_header->compression_type < 0) {
if (err) {
(*err) = "Invalid argument.";
}
return 0; // @fixme
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
if (err) {
(*err) = "PIZ compression is not supported in this build.";
}
return 0;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
if (err) {
(*err) = "ZFP compression is not supported in this build.";
}
return 0;
}
#endif
#if TINYEXR_USE_ZFP
for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) {
if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) {
if (err) {
(*err) = "Pixel type must be FLOAT for ZFP compression.";
}
return 0;
}
}
#endif
std::vector<unsigned char> memory;
// Header
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
memory.insert(memory.end(), header, header + 4);
}
// Version, scanline.
{
char marker[] = {2, 0, 0, 0};
/* @todo
if (exr_header->tiled) {
marker[1] |= 0x2;
}
if (exr_header->long_name) {
marker[1] |= 0x4;
}
if (exr_header->non_image) {
marker[1] |= 0x8;
}
if (exr_header->multipart) {
marker[1] |= 0x10;
}
*/
memory.insert(memory.end(), marker, marker + 4);
}
int num_scanlines = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanlines = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanlines = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanlines = 16;
}
// Write attributes.
std::vector<tinyexr::ChannelInfo> channels;
{
std::vector<unsigned char> data;
for (int c = 0; c < exr_header->num_channels; c++) {
tinyexr::ChannelInfo info;
info.p_linear = 0;
info.pixel_type = exr_header->requested_pixel_types[c];
info.x_sampling = 1;
info.y_sampling = 1;
info.name = std::string(exr_header->channels[c].name);
channels.push_back(info);
}
tinyexr::WriteChannelInfo(data, channels);
tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0),
static_cast<int>(data.size()));
}
{
int comp = exr_header->compression_type;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&comp));
tinyexr::WriteAttributeToMemory(
&memory, "compression", "compression",
reinterpret_cast<const unsigned char *>(&comp), 1);
}
{
int data[4] = {0, 0, exr_image->width - 1, exr_image->height - 1};
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[3]));
tinyexr::WriteAttributeToMemory(
&memory, "dataWindow", "box2i",
reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4);
tinyexr::WriteAttributeToMemory(
&memory, "displayWindow", "box2i",
reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4);
}
{
unsigned char line_order = 0; // @fixme { read line_order from EXRHeader }
tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder",
&line_order, 1);
}
{
float aspectRatio = 1.0f;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&aspectRatio));
tinyexr::WriteAttributeToMemory(
&memory, "pixelAspectRatio", "float",
reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float));
}
{
float center[2] = {0.0f, 0.0f};
tinyexr::swap4(reinterpret_cast<unsigned int *>(¢er[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(¢er[1]));
tinyexr::WriteAttributeToMemory(
&memory, "screenWindowCenter", "v2f",
reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float));
}
{
float w = static_cast<float>(exr_image->width);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&w));
tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float",
reinterpret_cast<const unsigned char *>(&w),
sizeof(float));
}
// Custom attributes
if (exr_header->num_custom_attributes > 0) {
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
tinyexr::WriteAttributeToMemory(
&memory, exr_header->custom_attributes[i].name,
exr_header->custom_attributes[i].type,
reinterpret_cast<const unsigned char *>(
exr_header->custom_attributes[i].value),
exr_header->custom_attributes[i].size);
}
}
{ // end of header
unsigned char e = 0;
memory.push_back(e);
}
int num_blocks = exr_image->height / num_scanlines;
if (num_blocks * num_scanlines < exr_image->height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks));
size_t headerSize = memory.size();
tinyexr::tinyexr_uint64 offset =
headerSize +
static_cast<size_t>(num_blocks) *
sizeof(
tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable)
std::vector<unsigned char> data;
std::vector<std::vector<unsigned char> > data_list(
static_cast<size_t>(num_blocks));
std::vector<size_t> channel_offset_list(
static_cast<size_t>(exr_header->num_channels));
int pixel_data_size = 0;
size_t channel_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
channel_offset_list[c] = channel_offset;
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
pixel_data_size += sizeof(unsigned short);
channel_offset += sizeof(unsigned short);
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
pixel_data_size += sizeof(float);
channel_offset += sizeof(float);
} else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
pixel_data_size += sizeof(unsigned int);
channel_offset += sizeof(unsigned int);
} else {
assert(0);
}
}
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
// Use ZFP compression parameter from custom attributes(if such a parameter
// exists)
{
bool ret = tinyexr::FindZFPCompressionParam(
&zfp_compression_param, exr_header->custom_attributes,
exr_header->num_custom_attributes);
if (!ret) {
// Use predefined compression parameter.
zfp_compression_param.type = 0;
zfp_compression_param.rate = 2;
}
}
#endif
// Use signed int since some OpenMP compiler doesn't allow unsigned type for
// `parallel for`
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_blocks; i++) {
size_t ii = static_cast<size_t>(i);
int start_y = num_scanlines * i;
int endY = (std::min)(num_scanlines * (i + 1), exr_image->height);
int h = endY - start_y;
std::vector<unsigned char> buf(
static_cast<size_t>(exr_image->width * h * pixel_data_size));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exr_image->width; x++) {
tinyexr::FP16 h16;
h16.u = reinterpret_cast<unsigned short **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::FP32 f32 = half_to_float(h16);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&f32.f));
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
line_ptr[x] = f32.f;
}
}
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exr_image->width; x++) {
unsigned short val = reinterpret_cast<unsigned short **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap2(&val);
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
line_ptr[x] = val;
}
}
} else {
assert(0);
}
} else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exr_image->width; x++) {
tinyexr::FP32 f32;
f32.f = reinterpret_cast<float **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::FP16 h16;
h16 = float_to_half_full(f32);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u));
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
line_ptr[x] = h16.u;
}
}
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exr_image->width; x++) {
float val = reinterpret_cast<float **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
line_ptr[x] = val;
}
}
} else {
assert(0);
}
} else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exr_image->width; x++) {
unsigned int val = reinterpret_cast<unsigned int **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap4(&val);
// Assume increasing Y
unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
line_ptr[x] = val;
}
}
}
}
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(buf.size());
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), buf.begin(),
buf.begin() + data_len);
} else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#if TINYEXR_USE_MINIZ
std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound(
static_cast<unsigned long>(buf.size())));
#else
std::vector<unsigned char> block(
compressBound(static_cast<uLong>(buf.size())));
#endif
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressZip(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// (buf.size() * 3) / 2 would be enough.
std::vector<unsigned char> block((buf.size() * 3) / 2);
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressRle(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
unsigned int bufLen =
1024 + static_cast<unsigned int>(
1.2 * static_cast<unsigned int>(
buf.size())); // @fixme { compute good bound. }
std::vector<unsigned char> block(bufLen);
unsigned int outSize = static_cast<unsigned int>(block.size());
CompressPiz(&block.at(0), &outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
buf.size(), channels, exr_image->width, h);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = outSize;
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
#else
assert(0);
#endif
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
std::vector<unsigned char> block;
unsigned int outSize;
tinyexr::CompressZfp(
&block, &outSize, reinterpret_cast<const float *>(&buf.at(0)),
exr_image->width, h, exr_header->num_channels, zfp_compression_param);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = outSize;
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
#else
assert(0);
#endif
} else {
assert(0);
}
} // omp parallel
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
data.insert(data.end(), data_list[i].begin(), data_list[i].end());
offsets[i] = offset;
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i]));
offset += data_list[i].size();
}
{
memory.insert(
memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)),
reinterpret_cast<unsigned char *>(&offsets.at(0)) +
sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks));
}
{ memory.insert(memory.end(), data.begin(), data.end()); }
assert(memory.size() > 0);
(*memory_out) = static_cast<unsigned char *>(malloc(memory.size()));
memcpy((*memory_out), &memory.at(0), memory.size());
return memory.size(); // OK
}
int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL || filename == NULL ||
exr_header->compression_type < 0) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
if (err) {
(*err) = "PIZ compression is not supported in this build.";
}
return 0;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
if (err) {
(*err) = "ZFP compression is not supported in this build.";
}
return 0;
}
#endif
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "wb");
#else
FILE *fp = fopen(filename, "wb");
#endif
if (!fp) {
if (err) {
(*err) = "Cannot write a file.";
}
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err);
if ((mem_size > 0) && mem) {
fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
return TINYEXR_SUCCESS;
}
int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) {
if (deep_image == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _MSC_VER
FILE *fp = NULL;
errno_t errcode = fopen_s(&fp, filename, "rb");
if ((!errcode) || (!fp)) {
if (err) {
(*err) = "Cannot read file.";
}
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
FILE *fp = fopen(filename, "rb");
if (!fp) {
if (err) {
(*err) = "Cannot read file.";
}
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#endif
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize == 0) {
fclose(fp);
if (err) {
(*err) = "File size is zero.";
}
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
(void)ret;
}
fclose(fp);
const char *head = &buf[0];
const char *marker = &buf[0];
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
if (err) {
(*err) = "Invalid magic number.";
}
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
// Version, scanline.
{
// ver 2.0, scanline, deep bit on(0x800)
// must be [2, 0, 0, 0]
if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) {
if (err) {
(*err) = "Unsupported version or scanline.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int num_scanline_blocks = 1; // 16 for ZIP compression.
int compression_type = -1;
int num_channels = -1;
std::vector<tinyexr::ChannelInfo> channels;
// Read attributes
size_t size = filesize - tinyexr::kEXRVersionSize;
for (;;) {
if (0 == size) {
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
marker++;
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (attr_name.compare("compression") == 0) {
compression_type = data[0];
if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) {
if (err) {
(*err) = "Unsupported compression type.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
}
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!tinyexr::ReadChannelInfo(channels, data)) {
if (err) {
(*err) = "Failed to parse channel info.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
num_channels = static_cast<int>(channels.size());
if (num_channels < 1) {
if (err) {
(*err) = "Invalid channels format.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
} else if (attr_name.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dx));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dy));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dw));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dh));
} else if (attr_name.compare("displayWindow") == 0) {
int x;
int y;
int w;
int h;
memcpy(&x, &data.at(0), sizeof(int));
memcpy(&y, &data.at(4), sizeof(int));
memcpy(&w, &data.at(8), sizeof(int));
memcpy(&h, &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&x));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&w));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&h));
}
}
assert(dx >= 0);
assert(dy >= 0);
assert(dw >= 0);
assert(dh >= 0);
assert(num_channels >= 1);
int data_width = dw - dx + 1;
int data_height = dh - dy + 1;
std::vector<float> image(
static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA
// Read offset tables.
int num_blocks = data_height / num_scanline_blocks;
if (num_blocks * num_scanline_blocks < data_height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks));
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
tinyexr::tinyexr_int64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset));
marker += sizeof(tinyexr::tinyexr_int64); // = 8
offsets[y] = offset;
}
#if TINYEXR_USE_PIZ
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) {
#else
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#endif
// OK
} else {
if (err) {
(*err) = "Unsupported format.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
deep_image->image = static_cast<float ***>(
malloc(sizeof(float **) * static_cast<size_t>(num_channels)));
for (int c = 0; c < num_channels; c++) {
deep_image->image[c] = static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
}
}
deep_image->offset_table = static_cast<int **>(
malloc(sizeof(int *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
deep_image->offset_table[y] = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(data_width)));
}
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y]);
// int: y coordinate
// int64: packed size of pixel offset table
// int64: packed size of sample data
// int64: unpacked size of sample data
// compressed pixel offset table
// compressed sample data
int line_no;
tinyexr::tinyexr_int64 packedOffsetTableSize;
tinyexr::tinyexr_int64 packedSampleDataSize;
tinyexr::tinyexr_int64 unpackedSampleDataSize;
memcpy(&line_no, data_ptr, sizeof(int));
memcpy(&packedOffsetTableSize, data_ptr + 4,
sizeof(tinyexr::tinyexr_int64));
memcpy(&packedSampleDataSize, data_ptr + 12,
sizeof(tinyexr::tinyexr_int64));
memcpy(&unpackedSampleDataSize, data_ptr + 20,
sizeof(tinyexr::tinyexr_int64));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize));
std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width));
// decode pixel offset table.
{
unsigned long dstLen =
static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int));
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)), &dstLen,
data_ptr + 28, static_cast<unsigned long>(packedOffsetTableSize))) {
return false;
}
assert(dstLen == pixelOffsetTable.size() * sizeof(int));
for (size_t i = 0; i < static_cast<size_t>(data_width); i++) {
deep_image->offset_table[y][i] = pixelOffsetTable[i];
}
}
std::vector<unsigned char> sample_data(
static_cast<size_t>(unpackedSampleDataSize));
// decode sample data.
{
unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize);
if (dstLen) {
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen,
data_ptr + 28 + packedOffsetTableSize,
static_cast<unsigned long>(packedSampleDataSize))) {
return false;
}
assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize));
}
}
// decode sample
int sampleSize = -1;
std::vector<int> channel_offset_list(static_cast<size_t>(num_channels));
{
int channel_offset = 0;
for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) {
channel_offset_list[i] = channel_offset;
if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT
channel_offset += 4;
} else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half
channel_offset += 2;
} else if (channels[i].pixel_type ==
TINYEXR_PIXELTYPE_FLOAT) { // float
channel_offset += 4;
} else {
assert(0);
}
}
sampleSize = channel_offset;
}
assert(sampleSize >= 2);
assert(static_cast<size_t>(
pixelOffsetTable[static_cast<size_t>(data_width - 1)] *
sampleSize) == sample_data.size());
int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize;
//
// Alloc memory
//
//
// pixel data is stored as image[channels][pixel_samples]
//
{
tinyexr::tinyexr_uint64 data_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
deep_image->image[c][y] = static_cast<float *>(
malloc(sizeof(float) * static_cast<size_t>(samples_per_line)));
if (channels[c].pixel_type == 0) { // UINT
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
unsigned int ui = *reinterpret_cast<unsigned int *>(
&sample_data.at(size_t(data_offset) + x * sizeof(int)));
deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme
}
data_offset +=
sizeof(unsigned int) * static_cast<size_t>(samples_per_line);
} else if (channels[c].pixel_type == 1) { // half
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
tinyexr::FP16 f16;
f16.u = *reinterpret_cast<unsigned short *>(
&sample_data.at(size_t(data_offset) + x * sizeof(short)));
tinyexr::FP32 f32 = half_to_float(f16);
deep_image->image[c][y][x] = f32.f;
}
data_offset += sizeof(short) * static_cast<size_t>(samples_per_line);
} else { // float
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
float f = *reinterpret_cast<float *>(
&sample_data.at(size_t(data_offset) + x * sizeof(float)));
deep_image->image[c][y][x] = f;
}
data_offset += sizeof(float) * static_cast<size_t>(samples_per_line);
}
}
}
} // y
deep_image->width = data_width;
deep_image->height = data_height;
deep_image->channel_names = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(num_channels)));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
#ifdef _WIN32
deep_image->channel_names[c] = _strdup(channels[c].name.c_str());
#else
deep_image->channel_names[c] = strdup(channels[c].name.c_str());
#endif
}
deep_image->num_channels = num_channels;
return TINYEXR_SUCCESS;
}
void InitEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return;
}
exr_image->width = 0;
exr_image->height = 0;
exr_image->num_channels = 0;
exr_image->images = NULL;
exr_image->tiles = NULL;
exr_image->num_tiles = 0;
}
void InitEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return;
}
memset(exr_header, 0, sizeof(EXRHeader));
}
int FreeEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->channels) {
free(exr_header->channels);
}
if (exr_header->pixel_types) {
free(exr_header->pixel_types);
}
if (exr_header->requested_pixel_types) {
free(exr_header->requested_pixel_types);
}
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
if (exr_header->custom_attributes[i].value) {
free(exr_header->custom_attributes[i].value);
}
}
return TINYEXR_SUCCESS;
}
int FreeEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->images && exr_image->images[i]) {
free(exr_image->images[i]);
}
}
if (exr_image->images) {
free(exr_image->images);
}
if (exr_image->tiles) {
for (int tid = 0; tid < exr_image->num_tiles; tid++) {
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) {
free(exr_image->tiles[tid].images[i]);
}
}
if (exr_image->tiles[tid].images) {
free(exr_image->tiles[tid].images);
}
}
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_header == NULL || exr_version == NULL || filename == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
if (err) {
(*err) = "Cannot read file.";
}
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
if (err) {
(*err) = "fread error.";
}
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize,
err);
}
int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers,
int *num_headers,
const EXRVersion *exr_version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_headers == NULL || num_headers == NULL ||
exr_version == NULL) {
// Invalid argument
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
std::vector<tinyexr::HeaderInfo> infos;
for (;;) {
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
bool empty_header = false;
int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str,
marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
if (err) {
#ifdef _WIN32
(*err) = _strdup(err_str.c_str()); // may leak
#else
(*err) = strdup(err_str.c_str()); // may leak
#endif
}
return ret;
}
if (empty_header) {
marker += 1; // skip '\0'
break;
}
// `chunkCount` must exist in the header.
if (info.chunk_count == 0) {
if (err) {
(*err) = "`chunkCount' attribute is not found in the header.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
infos.push_back(info);
// move to next header.
marker += info.header_len;
size -= info.header_len;
}
// allocate memory for EXRHeader and create array of EXRHeader pointers.
(*exr_headers) =
static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size()));
for (size_t i = 0; i < infos.size(); i++) {
EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader)));
ConvertHeader(exr_header, infos[i]);
// transfoer `tiled` from version.
exr_header->tiled = exr_version->tiled;
(*exr_headers)[i] = exr_header;
}
(*num_headers) = static_cast<int>(infos.size());
return TINYEXR_SUCCESS;
}
int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers,
const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_headers == NULL || num_headers == NULL || exr_version == NULL ||
filename == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
if (err) {
(*err) = "Cannot read file.";
}
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
if (err) {
(*err) = "fread error.";
}
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRMultipartHeaderFromMemory(
exr_headers, num_headers, exr_version, &buf.at(0), filesize, err);
}
int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory,
size_t size) {
if (version == NULL || memory == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory;
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
version->tiled = false;
version->long_name = false;
version->non_image = false;
version->multipart = false;
// Parse version header.
{
// must be 2
if (marker[0] != 2) {
return TINYEXR_ERROR_INVALID_EXR_VERSION;
}
if (version == NULL) {
return TINYEXR_SUCCESS; // May OK
}
version->version = 2;
if (marker[1] & 0x2) { // 9th bit
version->tiled = true;
}
if (marker[1] & 0x4) { // 10th bit
version->long_name = true;
}
if (marker[1] & 0x8) { // 11th bit
version->non_image = true; // (deep image)
}
if (marker[1] & 0x10) { // 12th bit
version->multipart = true;
}
}
return TINYEXR_SUCCESS;
}
int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) {
if (filename == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t file_size;
// Compute size
fseek(fp, 0, SEEK_END);
file_size = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (file_size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
unsigned char buf[tinyexr::kEXRVersionSize];
size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp);
fclose(fp);
if (ret != tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize);
}
int LoadEXRMultipartImageFromMemory(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory == NULL || (size <= tinyexr::kEXRVersionSize)) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
// compute total header size.
size_t total_header_size = 0;
for (unsigned int i = 0; i < num_parts; i++) {
if (exr_headers[i]->header_len == 0) {
if (err) {
(*err) = "EXRHeader is not initialized.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
total_header_size += exr_headers[i]->header_len;
}
const char *marker = reinterpret_cast<const char *>(
memory + total_header_size + 4 +
4); // +8 for magic number and version header.
marker += 1; // Skip empty header.
// NOTE 1:
// In multipart image, There is 'part number' before chunk data.
// 4 byte : part number
// 4+ : chunk
//
// NOTE 2:
// EXR spec says 'part number' is 'unsigned long' but actually this is
// 'unsigned int(4 bytes)' in OpenEXR implementation...
// http://www.openexr.com/openexrfilelayout.pdf
// Load chunk offset table.
std::vector<std::vector<tinyexr::tinyexr_uint64> > chunk_offset_table_list;
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
std::vector<tinyexr::tinyexr_uint64> offset_table(
static_cast<size_t>(exr_headers[i]->chunk_count));
for (size_t c = 0; c < offset_table.size(); c++) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, 8);
tinyexr::swap8(&offset);
if (offset >= size) {
if (err) {
(*err) = "Invalid offset size.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
offset_table[c] = offset + 4; // +4 to skip 'part number'
marker += 8;
}
chunk_offset_table_list.push_back(offset_table);
}
// Decode image.
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
std::vector<tinyexr::tinyexr_uint64> &offset_table =
chunk_offset_table_list[i];
// First check 'part number' is identitical to 'i'
for (size_t c = 0; c < offset_table.size(); c++) {
const unsigned char *part_number_addr =
memory + offset_table[c] - 4; // -4 to move to 'part number' field.
unsigned int part_no;
memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4
tinyexr::swap4(&part_no);
if (part_no != i) {
assert(0);
return TINYEXR_ERROR_INVALID_DATA;
}
}
int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_table,
memory, size);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
}
return TINYEXR_SUCCESS;
}
int LoadEXRMultipartImageFromFile(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts, const char *filename,
const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
if (err) {
(*err) = "Cannot read file.";
}
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts,
&buf.at(0), filesize, err);
}
int SaveEXR(const float *data, int width, int height, int components,
const int save_as_fp16, const char *outfilename) {
if ((components == 1) || components == 3 || components == 4) {
// OK
} else {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
// Assume at least 16x16 pixels.
if (width < 16) return TINYEXR_ERROR_INVALID_ARGUMENT;
if (height < 16) return TINYEXR_ERROR_INVALID_ARGUMENT;
EXRHeader header;
InitEXRHeader(&header);
EXRImage image;
InitEXRImage(&image);
image.num_channels = components;
std::vector<float> images[4];
if (components == 1) {
images[0].resize(static_cast<size_t>(width * height));
memcpy(images[0].data(), data, sizeof(float) * size_t(width * height));
} else {
images[0].resize(static_cast<size_t>(width * height));
images[1].resize(static_cast<size_t>(width * height));
images[2].resize(static_cast<size_t>(width * height));
images[3].resize(static_cast<size_t>(width * height));
// Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers
for (size_t i = 0; i < static_cast<size_t>(width * height); i++) {
images[0][i] = data[static_cast<size_t>(components) * i + 0];
images[1][i] = data[static_cast<size_t>(components) * i + 1];
images[2][i] = data[static_cast<size_t>(components) * i + 2];
if (components == 4) {
images[3][i] = data[static_cast<size_t>(components) * i + 3];
}
}
}
float *image_ptr[4] = {0, 0, 0, 0};
if (components == 4) {
image_ptr[0] = &(images[3].at(0)); // A
image_ptr[1] = &(images[2].at(0)); // B
image_ptr[2] = &(images[1].at(0)); // G
image_ptr[3] = &(images[0].at(0)); // R
} else if (components == 3) {
image_ptr[0] = &(images[2].at(0)); // B
image_ptr[1] = &(images[1].at(0)); // G
image_ptr[2] = &(images[0].at(0)); // R
} else if (components == 1) {
image_ptr[0] = &(images[0].at(0)); // A
}
image.images = reinterpret_cast<unsigned char **>(image_ptr);
image.width = width;
image.height = height;
header.num_channels = components;
header.channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels)));
// Must be (A)BGR order, since most of EXR viewers expect this channel order.
if (components == 4) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
strncpy_s(header.channels[1].name, "B", 255);
strncpy_s(header.channels[2].name, "G", 255);
strncpy_s(header.channels[3].name, "R", 255);
#else
strncpy(header.channels[0].name, "A", 255);
strncpy(header.channels[1].name, "B", 255);
strncpy(header.channels[2].name, "G", 255);
strncpy(header.channels[3].name, "R", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
header.channels[1].name[strlen("B")] = '\0';
header.channels[2].name[strlen("G")] = '\0';
header.channels[3].name[strlen("R")] = '\0';
} else if (components == 3) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "B", 255);
strncpy_s(header.channels[1].name, "G", 255);
strncpy_s(header.channels[2].name, "R", 255);
#else
strncpy(header.channels[0].name, "B", 255);
strncpy(header.channels[1].name, "G", 255);
strncpy(header.channels[2].name, "R", 255);
#endif
header.channels[0].name[strlen("B")] = '\0';
header.channels[1].name[strlen("G")] = '\0';
header.channels[2].name[strlen("R")] = '\0';
} else {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
#else
strncpy(header.channels[0].name, "A", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
}
header.pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
header.requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
for (int i = 0; i < header.num_channels; i++) {
header.pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image
if (save_as_fp16 > 0) {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format
} else {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e.
// no precision reduction)
}
}
const char *err;
int ret = SaveEXRImageToFile(&image, &header, outfilename, &err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
free(header.channels);
free(header.pixel_types);
free(header.requested_pixel_types);
return ret;
}
#ifdef __clang__
// zero-as-null-ppinter-constant
#pragma clang diagnostic pop
#endif
#endif // TINYEXR_IMPLEMENTATION_DEIFNED
#endif // TINYEXR_IMPLEMENTATION
|
struct_axpy.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
/******************************************************************************
*
* Structured axpy routine
*
*****************************************************************************/
#include "_hypre_struct_mv.h"
/*--------------------------------------------------------------------------
* hypre_StructAxpy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructAxpy( HYPRE_Complex alpha,
hypre_StructVector *x,
hypre_StructVector *y )
{
hypre_Box *x_data_box;
hypre_Box *y_data_box;
HYPRE_Int xi;
HYPRE_Int yi;
HYPRE_Complex *xp;
HYPRE_Complex *yp;
hypre_BoxArray *boxes;
hypre_Box *box;
hypre_Index loop_size;
hypre_IndexRef start;
hypre_Index unit_stride;
HYPRE_Int i;
hypre_SetIndex(unit_stride, 1);
boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(y));
hypre_ForBoxI(i, boxes)
{
box = hypre_BoxArrayBox(boxes, i);
start = hypre_BoxIMin(box);
x_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i);
y_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(y), i);
xp = hypre_StructVectorBoxData(x, i);
yp = hypre_StructVectorBoxData(y, i);
hypre_BoxGetSize(box, loop_size);
hypre_BoxLoop2Begin(hypre_StructVectorNDim(x), loop_size,
x_data_box, start, unit_stride, xi,
y_data_box, start, unit_stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi,yi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(xi, yi)
{
yp[yi] += alpha * xp[xi];
}
hypre_BoxLoop2End(xi, yi);
}
return hypre_error_flag;
}
|
a.34.1.c | /* { dg-do compile } */
void
work (int i, int j)
{
}
void
good_nesting (int n)
{
int i, j;
#pragma omp parallel default(shared)
{
#pragma omp for
for (i = 0; i < n; i++)
{
#pragma omp parallel shared(i, n)
{
#pragma omp for
for (j = 0; j < n; j++)
work (i, j);
}
}
}
}
|
residualbased_simple_steady_scheme.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Michael Andre, https://github.com/msandre
//
#if !defined(KRATOS_RESIDUALBASED_SIMPLE_STEADY_SCHEME )
#define KRATOS_RESIDUALBASED_SIMPLE_STEADY_SCHEME
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "solving_strategies/schemes/scheme.h"
#include "includes/variables.h"
#include "includes/cfd_variables.h"
#include "containers/array_1d.h"
#include "utilities/openmp_utils.h"
#include "utilities/coordinate_transformation_utilities.h"
#include "processes/process.h"
namespace Kratos {
///@name Kratos Classes
///@{
template<class TSparseSpace, class TDenseSpace >
class ResidualBasedSimpleSteadyScheme : public Scheme<TSparseSpace, TDenseSpace> {
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedSimpleSteadyScheme);
typedef Scheme<TSparseSpace, TDenseSpace> BaseType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef Element::GeometryType GeometryType;
///@}
///@name Life Cycle
///@{
ResidualBasedSimpleSteadyScheme(double VelocityRelaxationFactor,
double PressureRelaxationFactor,
unsigned int DomainSize)
: Scheme<TSparseSpace, TDenseSpace>(),
mVelocityRelaxationFactor(VelocityRelaxationFactor),
mPressureRelaxationFactor(PressureRelaxationFactor),
mRotationTool(DomainSize,DomainSize+1,SLIP)
{}
ResidualBasedSimpleSteadyScheme(
double VelocityRelaxationFactor,
double PressureRelaxationFactor,
unsigned int DomainSize,
Process::Pointer pTurbulenceModel)
: Scheme<TSparseSpace, TDenseSpace>(),
mVelocityRelaxationFactor(VelocityRelaxationFactor),
mPressureRelaxationFactor(PressureRelaxationFactor),
mRotationTool(DomainSize,DomainSize+1,SLIP),
mpTurbulenceModel(pTurbulenceModel)
{}
~ResidualBasedSimpleSteadyScheme() override {}
///@}
///@name Operators
///@{
double GetVelocityRelaxationFactor() const
{
return mVelocityRelaxationFactor;
}
void SetVelocityRelaxationFactor(double factor)
{
mVelocityRelaxationFactor = factor;
}
double GetPressureRelaxationFactor() const
{
return mPressureRelaxationFactor;
}
void SetPressureRelaxationFactor(double factor)
{
mPressureRelaxationFactor = factor;
}
void Update(ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb) override
{
KRATOS_TRY;
mRotationTool.RotateVelocities(rModelPart);
mpDofUpdater->UpdateDofs(rDofSet,rDx);
mRotationTool.RecoverVelocities(rModelPart);
KRATOS_CATCH("");
}
void CalculateSystemContributions(
Element::Pointer rCurrentElement,
LocalSystemMatrixType& LHS_Contribution,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId,
ProcessInfo& CurrentProcessInfo) override
{
KRATOS_TRY;
rCurrentElement->InitializeNonLinearIteration(CurrentProcessInfo);
rCurrentElement->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo);
Matrix SteadyLHS;
rCurrentElement->CalculateLocalVelocityContribution(SteadyLHS, RHS_Contribution, CurrentProcessInfo);
rCurrentElement->EquationIdVector(EquationId, CurrentProcessInfo);
if (SteadyLHS.size1() != 0)
noalias(LHS_Contribution) += SteadyLHS;
AddRelaxation(rCurrentElement->GetGeometry(), LHS_Contribution, RHS_Contribution, CurrentProcessInfo);
// apply slip condition
mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentElement->GetGeometry());
mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentElement->GetGeometry());
KRATOS_CATCH("");
}
void Condition_CalculateSystemContributions(
Condition::Pointer rCurrentCondition,
LocalSystemMatrixType& LHS_Contribution,
LocalSystemVectorType& RHS_Contribution,
Condition::EquationIdVectorType& EquationId,
ProcessInfo& CurrentProcessInfo) override
{
KRATOS_TRY;
rCurrentCondition->InitializeNonLinearIteration(CurrentProcessInfo);
rCurrentCondition->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo);
Matrix SteadyLHS;
rCurrentCondition->CalculateLocalVelocityContribution(SteadyLHS, RHS_Contribution, CurrentProcessInfo);
rCurrentCondition->EquationIdVector(EquationId, CurrentProcessInfo);
if (SteadyLHS.size1() != 0)
noalias(LHS_Contribution) += SteadyLHS;
AddRelaxation(rCurrentCondition->GetGeometry(), LHS_Contribution, RHS_Contribution, CurrentProcessInfo);
// apply slip condition
mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentCondition->GetGeometry());
mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentCondition->GetGeometry());
KRATOS_CATCH("");
}
void Calculate_RHS_Contribution(
Element::Pointer rCurrentElement,
LocalSystemVectorType& rRHS_Contribution,
Element::EquationIdVectorType& rEquationId,
ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY;
Matrix LHS_Contribution;
CalculateSystemContributions(rCurrentElement,LHS_Contribution,
rRHS_Contribution,rEquationId,rCurrentProcessInfo);
KRATOS_CATCH("");
}
void Condition_Calculate_RHS_Contribution(
Condition::Pointer rCurrentCondition,
LocalSystemVectorType& rRHS_Contribution,
Element::EquationIdVectorType& rEquationId,
ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY;
Matrix LHS_Contribution;
Condition_CalculateSystemContributions(rCurrentCondition,LHS_Contribution,
rRHS_Contribution,rEquationId,
rCurrentProcessInfo);
KRATOS_CATCH("");
}
void InitializeNonLinIteration(ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb) override
{
KRATOS_TRY;
for (typename ModelPart::NodesContainerType::iterator itNode = rModelPart.NodesBegin();
itNode != rModelPart.NodesEnd(); itNode++)
itNode->FastGetSolutionStepValue(NODAL_AREA) = 0.0;
double output;
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
const int number_of_elements = rModelPart.NumberOfElements();
#pragma omp parallel for private(output)
for (int i = 0; i < number_of_elements; i++) {
ModelPart::ElementsContainerType::iterator it_elem = rModelPart.ElementsBegin() + i;
it_elem->Calculate(NODAL_AREA, output, CurrentProcessInfo);
}
rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA);
if (mpTurbulenceModel != 0) // If not null
mpTurbulenceModel->Execute();
KRATOS_CATCH("");
}
void FinalizeNonLinIteration(ModelPart &rModelPart,
TSystemMatrixType &rA,
TSystemVectorType &rDx,
TSystemVectorType &rb) override
{
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
//if orthogonal subscales are computed
if (CurrentProcessInfo[OSS_SWITCH] == 1.0) {
KRATOS_INFO_IF("ResidualBasedSimpleSteadyScheme", rModelPart.GetCommunicator().MyPID() == 0)
<< "Computing OSS projections" << std::endl;
const int number_of_nodes = rModelPart.NumberOfNodes();
#pragma omp parallel for
for (int i = 0; i < number_of_nodes; i++) {
ModelPart::NodeIterator it_node = rModelPart.NodesBegin() + i;
noalias(it_node->FastGetSolutionStepValue(ADVPROJ)) = ZeroVector(3);
it_node->FastGetSolutionStepValue(DIVPROJ) = 0.0;
it_node->FastGetSolutionStepValue(NODAL_AREA) = 0.0;
}
const int number_of_elements = rModelPart.NumberOfElements();
array_1d<double, 3 > output;
#pragma omp parallel for private(output)
for (int i = 0; i < number_of_elements; i++) {
ModelPart::ElementIterator it_elem = rModelPart.ElementsBegin() + i;
it_elem->Calculate(ADVPROJ,output,CurrentProcessInfo);
}
rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA);
rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ);
rModelPart.GetCommunicator().AssembleCurrentData(ADVPROJ);
#pragma omp parallel for
for (int i = 0; i < number_of_nodes; i++) {
ModelPart::NodeIterator it_node = rModelPart.NodesBegin() + i;
if (it_node->FastGetSolutionStepValue(NODAL_AREA) == 0.0)
it_node->FastGetSolutionStepValue(NODAL_AREA) = 1.0;
const double area_inverse = 1.0 / it_node->FastGetSolutionStepValue(NODAL_AREA);
it_node->FastGetSolutionStepValue(ADVPROJ) *= area_inverse;
it_node->FastGetSolutionStepValue(DIVPROJ) *= area_inverse;
}
}
}
void FinalizeSolutionStep(ModelPart &rModelPart,
TSystemMatrixType &rA,
TSystemVectorType &rDx,
TSystemVectorType &rb) override
{
LocalSystemVectorType RHS_Contribution;
LocalSystemMatrixType LHS_Contribution;
ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
for (ModelPart::NodeIterator itNode = rModelPart.NodesBegin();
itNode != rModelPart.NodesEnd(); ++itNode)
{
itNode->FastGetSolutionStepValue(REACTION_X,0) = 0.0;
itNode->FastGetSolutionStepValue(REACTION_Y,0) = 0.0;
itNode->FastGetSolutionStepValue(REACTION_Z,0) = 0.0;
}
for (ModelPart::ElementsContainerType::ptr_iterator itElem = rModelPart.Elements().ptr_begin();
itElem != rModelPart.Elements().ptr_end(); ++itElem)
{
(*itElem)->InitializeNonLinearIteration(rCurrentProcessInfo);
(*itElem)->CalculateLocalSystem(LHS_Contribution,RHS_Contribution,rCurrentProcessInfo);
Matrix SteadyLHS;
(*itElem)->CalculateLocalVelocityContribution(SteadyLHS,RHS_Contribution,rCurrentProcessInfo);
GeometryType& rGeom = (*itElem)->GetGeometry();
unsigned int NumNodes = rGeom.PointsNumber();
unsigned int Dimension = rGeom.WorkingSpaceDimension();
unsigned int index = 0;
for (unsigned int i = 0; i < NumNodes; i++)
{
rGeom[i].FastGetSolutionStepValue(REACTION_X,0) -= RHS_Contribution[index++];
rGeom[i].FastGetSolutionStepValue(REACTION_Y,0) -= RHS_Contribution[index++];
if (Dimension == 3) rGeom[i].FastGetSolutionStepValue(REACTION_Z,0) -= RHS_Contribution[index++];
index++; // skip pressure dof
}
}
rModelPart.GetCommunicator().AssembleCurrentData(REACTION);
Scheme<TSparseSpace, TDenseSpace>::FinalizeSolutionStep(rModelPart, rA, rDx, rb);
}
///@}
protected:
///@name Protected Operators
///@{
void AddRelaxation(const GeometryType& rGeometry,
LocalSystemMatrixType& LHS_Contribution,
LocalSystemVectorType& RHS_Contribution,
ProcessInfo& CurrentProcessInfo)
{
if (LHS_Contribution.size1() == 0)
return;
const unsigned int NumNodes = rGeometry.PointsNumber();
const unsigned int Dimension = rGeometry.WorkingSpaceDimension();
Matrix Mass;
this->CalculateLumpedMassMatrix(rGeometry,Mass);
unsigned int DofIndex = 0;
for (unsigned int iNode = 0; iNode < NumNodes; iNode++)
{
const array_1d<double, 3>& rVel = rGeometry[iNode].FastGetSolutionStepValue(VELOCITY,0);
const double Area = rGeometry[iNode].FastGetSolutionStepValue(NODAL_AREA,0);
double VelNorm = 0.0;
for (unsigned int d = 0; d < Dimension; ++d)
VelNorm += rVel[d] * rVel[d];
VelNorm = sqrt(VelNorm);
double LocalDt;
if (VelNorm != 0.0)
LocalDt = pow(Area, 1.0 / double(Dimension)) / VelNorm;
else
LocalDt = 1.0;
for (unsigned int i = 0; i < Dimension; i++)
{
Mass(DofIndex,DofIndex) *= 1.0 / (mVelocityRelaxationFactor * LocalDt);
DofIndex++;
}
DofIndex++; // pressure dof
}
noalias(LHS_Contribution) += Mass;
// pressure relaxation
for (unsigned int iNode = 0; iNode < NumNodes; iNode++)
{
unsigned int BlockIndex = iNode * (Dimension + 1);
LHS_Contribution(BlockIndex+Dimension,BlockIndex+Dimension) *= 1.0 / mPressureRelaxationFactor;
}
}
void CalculateLumpedMassMatrix(
const GeometryType& rGeometry,
LocalSystemMatrixType& rLumpedMass) const
{
const unsigned int dimension = rGeometry.WorkingSpaceDimension();
const unsigned int number_of_nodes = rGeometry.PointsNumber();
const unsigned int nodal_block_size = dimension + 1;
const unsigned int local_size = nodal_block_size * number_of_nodes;
if (rLumpedMass.size1() != local_size) {
rLumpedMass.resize(local_size,local_size,false);
}
noalias(rLumpedMass) = ZeroMatrix(local_size,local_size);
const double size_fraction = rGeometry.DomainSize() / number_of_nodes;
for (unsigned int i = 0; i < number_of_nodes; i++){
const unsigned int node_block = i*nodal_block_size;
const double lumped_mass = size_fraction * rGeometry[i].FastGetSolutionStepValue(DENSITY);
for (unsigned int d = 0; d < dimension; d++) {
rLumpedMass(node_block+d,node_block+d) = lumped_mass;
}
}
}
///@}
private:
///@name Member Variables
///@{
double mVelocityRelaxationFactor;
double mPressureRelaxationFactor;
CoordinateTransformationUtils<LocalSystemMatrixType,LocalSystemVectorType,double> mRotationTool;
Process::Pointer mpTurbulenceModel;
typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater();
///@}
};
///@}
} // namespace Kratos
#endif /* KRATOS_RESIDUALBASED_SIMPLE_STEADY_SCHEME defined */
|
qla_bench.c | #include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <math.h>
#include <qla.h>
#ifdef _OPENMP
# include <omp.h>
# define __USE_GNU
# include <sched.h>
#endif
#if QLA_Precision == 'F'
#define REALBYTES 4
#else
#define REALBYTES 8
#endif
#define NC QLA_Nc
#define myalloc(type, n) (type *) aligned_malloc(n*sizeof(type))
#define ALIGN 64
static void start_slice(){
__asm__ __volatile__ ("");
}
static void end_slice(){
__asm__ __volatile__ ("");
}
void *
aligned_malloc(size_t n)
{
size_t m = (size_t) malloc(n+ALIGN);
size_t r = m % ALIGN;
if(r) m += (ALIGN - r);
return (void *)m;
}
double
dtime(void)
{
#ifdef _OPENMP
return omp_get_wtime();
#else
struct timeval tv;
gettimeofday(&tv, NULL);
return CLOCKS_PER_SEC*(tv.tv_sec + 1e-6*tv.tv_usec);
#endif
}
void
set_R(QLA_Real *r, int i)
{
*r = 1+cos(i);
}
void
set_C(QLA_Complex *c, int i)
{
QLA_c_eq_r_plus_ir(*c, 1+cos(i), 1+sin(i));
}
void
set_V(QLA_ColorVector *v, int i)
{
for(int j=0; j<QLA_Nc; j++) {
QLA_c_eq_r_plus_ir(QLA_elem_V(*v,j), j+1+cos(i), j+1+sin(i));
//QLA_real(QLA_elem_V(*v,j)) = 1;
//QLA_imag(QLA_elem_V(*v,j)) = 0;
}
}
void
set_H(QLA_HalfFermion *h, int i)
{
for(int j=0; j<QLA_Nc; j++) {
for(int k=0; k<(QLA_Ns/2); k++) {
QLA_c_eq_r_plus_ir(QLA_elem_H(*h,j,k), (j+4)*(k+1)+cos(i), (j+4)*(k+1)+sin(i));
}
}
}
void
set_D(QLA_DiracFermion *d, int i)
{
for(int j=0; j<QLA_Nc; j++) {
for(int k=0; k<QLA_Ns; k++) {
QLA_c_eq_r_plus_ir(QLA_elem_D(*d,j,k), (j+4)*(k+1)+cos(i), (j+4)*(k+1)+sin(i));
}
}
}
void
set_M(QLA_ColorMatrix *m, int i)
{
for(int j=0; j<QLA_Nc; j++) {
for(int k=0; k<QLA_Nc; k++) {
QLA_c_eq_r_plus_ir(QLA_elem_M(*m,j,k),
(((j-k+QLA_Nc+1)*(j+k+1))%19)+cos(i),
(((j+4)*(k+1))%17)+sin(i));
}
}
}
QLA_Real
sum_C(QLA_Complex *d, int n)
{
QLA_Real t=0, *r=(QLA_Real *)d;
int nn = n*sizeof(QLA_Complex)/sizeof(QLA_Real);
for(int i=0; i<nn; i++) t += r[i];
return t/nn;
}
QLA_Real
sum_V(QLA_ColorVector *d, int n)
{
QLA_Real t=0, *r=(QLA_Real *)d;
int nn = n*sizeof(QLA_ColorVector)/sizeof(QLA_Real);
for(int i=0; i<nn; i++) t += r[i];
return t/nn;
}
QLA_Real
sum_H(QLA_HalfFermion *d, int n)
{
QLA_Real t=0, *r=(QLA_Real *)d;
int nn = n*sizeof(QLA_HalfFermion)/sizeof(QLA_Real);
for(int i=0; i<nn; i++) t += r[i];
return t/nn;
}
QLA_Real
sum_D(QLA_DiracFermion *d, int n)
{
QLA_Real t=0, *r=(QLA_Real *)d;
int nn = n*sizeof(QLA_DiracFermion)/sizeof(QLA_Real);
for(int i=0; i<nn; i++) t += r[i];
return t/nn;
}
QLA_Real
sum_M(QLA_ColorMatrix *d, int n)
{
QLA_Real t=0, *r=(QLA_Real *)d;
int nn = n*sizeof(QLA_ColorMatrix)/sizeof(QLA_Real);
for(int i=0; i<nn; i++) t += r[i];
return t/nn;
}
#define set_fields { \
_Pragma("omp parallel for") \
for(int i=0; i<n; ++i) { \
set_R(&r1[i], i); \
set_C(&c1[i], i); \
set_V(&v1[i], i); \
set_V(&v2[i], i); \
set_D(&d1[i], i); \
set_D(&d2[i], i); \
set_M(&m1[i], i); \
set_M(&m2[i], i); \
set_M(&m3[i], i); \
int j = ((i|16)+256) % n; \
vp1[i] = &v2[j]; \
dp1[i] = &d2[j]; \
mp1[i] = &m3[j]; \
} \
}
void QLA_D3_V_vpeq_M_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector **b, int n)
{
// start_slice();
#ifdef HAVE_XLC
#pragma disjoint(*r,*a,**b)
__alignx(16,r);
__alignx(16,a);
#endif
#pragma omp parallel for
for(int i=0; i<n; i++) {
#ifdef HAVE_XLC
__alignx(16,b[i]);
#endif
for(int i_c=0; i_c<3; i_c++) {
QLA_D_Complex x;
QLA_c_eq_c(x,QLA_D3_elem_V(r[i],i_c));
for(int k_c=0; k_c<3; k_c++) {
QLA_c_peq_c_times_c(x, QLA_D3_elem_M(a[i],i_c,k_c), QLA_D3_elem_V(*b[i],k_c));
}
QLA_c_eq_c(QLA_D3_elem_V(r[i],i_c),x);
}
}
// end_slice();
}
void slice11(QLA_ColorVector *v1, QLA_ColorMatrix *m1,
QLA_ColorVector **vp1, int n, int c, int start, int end){
start_slice();
for(int i=start; i<end; ++i) {
QLA_V_vpeq_M_times_pV(v1, m1, vp1, n);
}
end_slice();
}
void slice1(
int n, QLA_Real* r1, QLA_Complex* c1,
QLA_ColorVector *v1, QLA_ColorVector* v2,
QLA_ColorVector **vp1, QLA_DiracFermion *d1,
QLA_DiracFermion *d2, QLA_DiracFermion **dp1,
QLA_ColorMatrix *m1, QLA_ColorMatrix *m2,
QLA_ColorMatrix *m3, QLA_ColorMatrix **mp1){
QLA_Real sum;
double cf = 9.e9/n;
double flop, mem, time1;
int nmin, nmax, c, nthreads=1;
set_fields;
mem = 2*(3+NC)*NC*REALBYTES;
flop = 8*NC*NC;
c = 1 + cf/(flop+mem);
time1 = dtime();
int start = 0;
for(int i=0; i<c; i=i+1000){
slice11(v1, m1, vp1, n, c, 0, 1000);
start = i;
}
slice11(v1, m1, vp1, n, c, start, c);
time1 = dtime() - time1;
sum = sum_V(v1, n);
printf("%-32s:", "QLA_V_vpeq_M_times_pV");
printf("%12g time=%5.2f mem=%5.0f mflops=%5.0f\n", sum, time1, mem*n*c/(1e6*time1), flop*n*c/(1e6*time1));
}
void slice2(
int n, QLA_Real* r1, QLA_Complex* c1,
QLA_ColorVector *v1, QLA_ColorVector* v2,
QLA_ColorVector **vp1, QLA_DiracFermion *d1,
QLA_DiracFermion *d2, QLA_DiracFermion **dp1,
QLA_ColorMatrix *m1, QLA_ColorMatrix *m2,
QLA_ColorMatrix *m3, QLA_ColorMatrix **mp1){
QLA_Real sum;
double cf = 9.e9/n;
double flop, mem, time1;
int nmin, nmax, c, nthreads=1;
set_fields;
mem = 2*(2+NC)*NC*REALBYTES;
flop = (8*NC-2)*NC;
c = 1 + cf/(flop+mem);
time1 = dtime();
for(int i=0; i<c; ++i) {
QLA_V_veq_Ma_times_V(v1, m1, v2, n);
}
time1 = dtime() - time1;
sum = sum_V(v1, n);
printf("%-32s:", "QLA_V_veq_Ma_times_V");
printf("%12g time=%5.2f mem=%5.0f mflops=%5.0f\n", sum, time1, mem*n*c/(1e6*time1), flop*n*c/(1e6*time1));
}
void slice3(
int n, QLA_Real* r1, QLA_Complex* c1,
QLA_ColorVector *v1, QLA_ColorVector* v2,
QLA_ColorVector **vp1, QLA_DiracFermion *d1,
QLA_DiracFermion *d2, QLA_DiracFermion **dp1,
QLA_ColorMatrix *m1, QLA_ColorMatrix *m2,
QLA_ColorMatrix *m3, QLA_ColorMatrix **mp1){
QLA_Real sum;
double cf = 9.e9/n;
double flop, mem, time1;
int nmin, nmax, c, nthreads=1;
set_fields;
mem = 6*NC*REALBYTES;
flop = 2*NC;
c = 1 + cf/(flop+mem);
time1 = dtime();
for(int i=0; i<c; ++i) {
QLA_V_vmeq_pV(v1, vp1, n);
}
time1 = dtime() - time1;
sum = sum_V(v1, n);
printf("%-32s:", "QLA_V_vmeq_pV");
printf("%12g time=%5.2f mem=%5.0f mflops=%5.0f\n", sum, time1, mem*n*c/(1e6*time1), flop*n*c/(1e6*time1));
}
void slice4(
int n, QLA_Real* r1, QLA_Complex* c1,
QLA_ColorVector *v1, QLA_ColorVector* v2,
QLA_ColorVector **vp1, QLA_DiracFermion *d1,
QLA_DiracFermion *d2, QLA_DiracFermion **dp1,
QLA_ColorMatrix *m1, QLA_ColorMatrix *m2,
QLA_ColorMatrix *m3, QLA_ColorMatrix **mp1){
QLA_Real sum;
double cf = 9.e9/n;
double flop, mem, time1;
int nmin, nmax, c, nthreads=1;
set_fields;
mem = 2*(12+NC)*NC*REALBYTES;
flop = (16*NC+8)*NC;
c = 1 + cf/(flop+mem);
time1 = dtime();
for(int i=0; i<c; ++i) {
QLA_D_vpeq_spproj_M_times_pD(d1, m1, dp1,0,1,n);
}
time1 = dtime() - time1;
sum = sum_D(d1,n);
printf("%-32s:", "QLA_D_vpeq_spproj_M_times_pD");
printf("%12g time=%5.2f mem=%5.0f mflops=%5.0f\n", sum, time1, mem*n*c/(1e6*time1), flop*n*c/(1e6*time1));
}
void slice5(
int n, QLA_Real* r1, QLA_Complex* c1,
QLA_ColorVector *v1, QLA_ColorVector* v2,
QLA_ColorVector **vp1, QLA_DiracFermion *d1,
QLA_DiracFermion *d2, QLA_DiracFermion **dp1,
QLA_ColorMatrix *m1, QLA_ColorMatrix *m2,
QLA_ColorMatrix *m3, QLA_ColorMatrix **mp1){
QLA_Real sum;
double cf = 9.e9/n;
double flop, mem, time1;
int nmin, nmax, c, nthreads=1;
set_fields;
mem = 6*NC*NC*REALBYTES;
flop = (8*NC-2)*NC*NC;
c = 1 + cf/(flop+mem);
time1 = dtime();
for(int i=0; i<c; ++i) {
QLA_M_veq_M_times_pM(m1, m2, mp1, n);
}
time1 = dtime() - time1;
sum = sum_M(m1, n);
printf("%-32s:", "QLA_M_veq_M_times_pM");
printf("%12g time=%5.2f mem=%5.0f mflops=%5.0f\n", sum, time1, mem*n*c/(1e6*time1), flop*n*c/(1e6*time1));
}
void slice6(
int n, QLA_Real* r1, QLA_Complex* c1,
QLA_ColorVector *v1, QLA_ColorVector* v2,
QLA_ColorVector **vp1, QLA_DiracFermion *d1,
QLA_DiracFermion *d2, QLA_DiracFermion **dp1,
QLA_ColorMatrix *m1, QLA_ColorMatrix *m2,
QLA_ColorMatrix *m3, QLA_ColorMatrix **mp1){
QLA_Real sum;
double cf = 9.e9/n;
double flop, mem, time1;
int nmin, nmax, c, nthreads=1;
set_fields;
mem = 2*NC*REALBYTES;
flop = 4*NC;
c = 1 + cf/(flop+mem);
time1 = dtime();
for(int i=0; i<c; ++i) {
QLA_r_veq_norm2_V(r1, v1, n);
}
time1 = dtime() - time1;
sum = *r1;
printf("%-32s:", "QLA_r_veq_norm2_V");
printf("%12g time=%5.2f mem=%5.0f mflops=%5.0f\n", sum, time1, mem*n*c/(1e6*time1), flop*n*c/(1e6*time1));
}
void slice7(
int n, QLA_Real* r1, QLA_Complex* c1,
QLA_ColorVector *v1, QLA_ColorVector* v2,
QLA_ColorVector **vp1, QLA_DiracFermion *d1,
QLA_DiracFermion *d2, QLA_DiracFermion **dp1,
QLA_ColorMatrix *m1, QLA_ColorMatrix *m2,
QLA_ColorMatrix *m3, QLA_ColorMatrix **mp1){
QLA_Real sum;
double cf = 9.e9/n;
double flop, mem, time1;
int nmin, nmax, c, nthreads=1;
set_fields;
mem = 4*NC*REALBYTES;
flop = 8*NC;
c = 1 + cf/(flop+mem);
time1 = dtime();
for(int i=0; i<c; ++i) {
QLA_c_veq_V_dot_V(c1, v1, v2, n);
}
time1 = dtime() - time1;
sum = QLA_norm2_c(*c1);
printf("%-32s:", "QLA_c_veq_V_dot_V");
printf("%12g time=%5.2f mem=%5.0f mflops=%5.0f\n", sum, time1, mem*n*c/(1e6*time1), flop*n*c/(1e6*time1));
}
void qla(int n, QLA_Real* r1, QLA_Complex* c1,
QLA_ColorVector *v1, QLA_ColorVector* v2,
QLA_ColorVector **vp1, QLA_DiracFermion *d1,
QLA_DiracFermion *d2, QLA_DiracFermion **dp1,
QLA_ColorMatrix *m1, QLA_ColorMatrix *m2,
QLA_ColorMatrix *m3, QLA_ColorMatrix **mp1){
QLA_Real sum;
int nmin, nmax, c, nthreads=1;
double flop, mem, time1;
printf("len = %i\n", n);
printf("len/thread = %i\n", n/nthreads);
double cf = 9.e9/n;
slice1(n, r1, c1, v1, v2, vp1, d1, d2, dp1, m1, m2, m3, mp1);
/*
slice2(n, r1, c1, v1, v2, vp1, d1, d2, dp1, m1, m2, m3, mp1);
slice3(n, r1, c1, v1, v2, vp1, d1, d2, dp1, m1, m2, m3, mp1);
slice4(n, r1, c1, v1, v2, vp1, d1, d2, dp1, m1, m2, m3, mp1);
slice5(n, r1, c1, v1, v2, vp1, d1, d2, dp1, m1, m2, m3, mp1);
slice6(n, r1, c1, v1, v2, vp1, d1, d2, dp1, m1, m2, m3, mp1);
slice7(n, r1, c1, v1, v2, vp1, d1, d2, dp1, m1, m2, m3, mp1);
*/
}
int
main(int argc, char *argv[])
{
QLA_Real sum, *r1;
QLA_Complex *c1;
QLA_ColorVector *v1, *v2, *v3, *v4, *v5;
QLA_ColorVector **vp1, **vp2, **vp3, **vp4;
QLA_HalfFermion *h1, *h2, **hp1;
QLA_DiracFermion *d1, *d2, **dp1;
QLA_ColorMatrix *m1, *m2, *m3, *m4, **mp1;
double flop, mem, time1;
int nmin, nmax, c, nthreads=1;
printf("QLA_Precision = %c\n", QLA_Precision);
#ifdef _OPENMP
nthreads = omp_get_max_threads();
printf("OMP THREADS = %i\n", nthreads);
printf("omp_get_wtick = %g\n", omp_get_wtick());
#ifdef CPU_ZERO
#pragma omp parallel
{
int tid = omp_get_thread_num();
cpu_set_t set;
CPU_ZERO(&set);
CPU_SET(tid, &set);
sched_setaffinity(0, sizeof(set), &set);
}
#endif
#endif
nmin = 64*nthreads;
nmax = 256*1024*nthreads;
r1 = myalloc(QLA_Real, nmax);
c1 = myalloc(QLA_Complex, nmax);
v1 = myalloc(QLA_ColorVector, nmax);
v2 = myalloc(QLA_ColorVector, nmax);
vp1 = myalloc(QLA_ColorVector *, nmax);
d1 = myalloc(QLA_DiracFermion, nmax);
d2 = myalloc(QLA_DiracFermion, nmax);
dp1 = myalloc(QLA_DiracFermion *, nmax);
m1 = myalloc(QLA_ColorMatrix, nmax);
m2 = myalloc(QLA_ColorMatrix, nmax);
m3 = myalloc(QLA_ColorMatrix, nmax);
mp1 = myalloc(QLA_ColorMatrix *, nmax);
for(int n=nmin; n<=nmax; n*=2) {
qla(nmin, r1, c1, v1, v2, vp1, d1, d2, dp1, m1, m2, m3, mp1);
}
return 0;
}
|
pi-v16.c | /*
* Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x)
* between 0 and 1.
*
* parallel version using OpenMP
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h> /* OpenMP */
#if _DEBUG_
#define _DEBUG_ 1
#else
#define _DEBUG_ 0
#include "extrae_user_events.h"
#define PROGRAM 1000
#define PI_COMPUTATION 1
#define END 0
#endif
int main(int argc, char *argv[]) {
double x, sum=0.0, pi=0.0;
#if _DEBUG_
double start,end;
#endif
int i;
const char Usage[] = "Usage: pi <num_steps> (try 1000000000)\n";
if (argc < 2) {
fprintf(stderr, Usage);
exit(1);
}
int num_steps = atoi(argv[1]);
double step = 1.0/(double) num_steps;
#if _DEBUG_
start= omp_get_wtime();
#else
Extrae_event (PROGRAM, PI_COMPUTATION);
#endif
/* do computation -- using just two threads */
// WARNING : incorrect code
#pragma omp parallel private(i,x) reduction(+:sum)
{
#if _DEBUG_
int id = omp_get_thread_num();
#endif
#pragma omp for schedule(dynamic, num_steps/4) nowait
for (i=0; i < num_steps/2; i++) {
x = (i+0.5)*step;
sum += 4.0/(1.0+x*x);
#if _DEBUG_
printf("thread id:%d it:%d\n",id,i);
#endif
}
#pragma omp for schedule(dynamic, num_steps/4) nowait
for (i=num_steps/2; i < num_steps; i++) {
x = (i+0.5)*step;
sum += 4.0/(1.0+x*x);
#if _DEBUG_
printf("thread id:%d it:%d\n",id,i);
#endif
}
#pragma omp single
pi = step * sum;
}
#if _DEBUG_
end = omp_get_wtime();
printf("Wall clock execution time = %.9f seconds\n", end-start);
#else
Extrae_event (PROGRAM, END);
#endif
/* print results */
printf("Value of pi = %12.10f\n", pi);
return EXIT_SUCCESS;
}
|
GB_unop__identity_int32_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int32_int16)
// op(A') function: GB (_unop_tran__identity_int32_int16)
// C type: int32_t
// A type: int16_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = (int32_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = (int32_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int32_int16)
(
int32_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
int32_t z = (int32_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
int32_t z = (int32_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int32_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
intruder.c | /* =============================================================================
*
* intruder.c
*
* =============================================================================
*
* Copyright (C) Stanford University, 2006. All Rights Reserved.
* Author: Chi Cao Minh
*
* =============================================================================
*
* For the license of bayes/sort.h and bayes/sort.c, please see the header
* of the files.
*
* ------------------------------------------------------------------------
*
* For the license of kmeans, please see kmeans/LICENSE.kmeans
*
* ------------------------------------------------------------------------
*
* For the license of ssca2, please see ssca2/COPYRIGHT
*
* ------------------------------------------------------------------------
*
* For the license of lib/mt19937ar.c and lib/mt19937ar.h, please see the
* header of the files.
*
* ------------------------------------------------------------------------
*
* For the license of lib/rbtree.h and lib/rbtree.c, please see
* lib/LEGALNOTICE.rbtree and lib/LICENSE.rbtree
*
* ------------------------------------------------------------------------
*
* Unless otherwise noted, the following license applies to STAMP files:
*
* Copyright (c) 2007, Stanford University
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Stanford University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* =============================================================================
*/
#include <assert.h>
#include <getopt.h>
#include <stdio.h>
#include <stdlib.h>
#include "decoder.h"
#include "detector.h"
#include "dictionary.h"
#include "packet.h"
#include "stream.h"
#include "thread.h"
#include "timer.h"
#include "tm.h"
enum param_types {
PARAM_ATTACK = (unsigned char)'a',
PARAM_LENGTH = (unsigned char)'l',
PARAM_NUM = (unsigned char)'n',
PARAM_SEED = (unsigned char)'s',
PARAM_THREAD = (unsigned char)'t',
};
enum param_defaults {
PARAM_DEFAULT_ATTACK = 10,
PARAM_DEFAULT_LENGTH = 16,
PARAM_DEFAULT_NUM = 1 << 20,
PARAM_DEFAULT_SEED = 1,
PARAM_DEFAULT_THREAD = 1,
};
long global_params[256];
#if 0
= { /* 256 = ascii limit */
[PARAM_ATTACK] = PARAM_DEFAULT_ATTACK,
[PARAM_LENGTH] = PARAM_DEFAULT_LENGTH,
[PARAM_NUM] = PARAM_DEFAULT_NUM,
[PARAM_SEED] = PARAM_DEFAULT_SEED,
[PARAM_THREAD] = PARAM_DEFAULT_THREAD,
};
#endif
void global_param_init()
{
global_params[PARAM_ATTACK] = PARAM_DEFAULT_ATTACK;
global_params[PARAM_LENGTH] = PARAM_DEFAULT_LENGTH;
global_params[PARAM_NUM] = PARAM_DEFAULT_NUM;
global_params[PARAM_SEED] = PARAM_DEFAULT_SEED;
global_params[PARAM_THREAD] = PARAM_DEFAULT_THREAD;
}
typedef struct arg {
/* input: */
stream_t* streamPtr;
decoder_t* decoderPtr;
/* output: */
vector_t** errorVectors;
} arg_t;
/* =============================================================================
* displayUsage
* =============================================================================
*/
static void
displayUsage (const char* appName)
{
printf("Usage: %s [options]\n", appName);
puts("\nOptions: (defaults)\n");
printf(" a <UINT> Percent [a]ttack (%i)\n", PARAM_DEFAULT_ATTACK);
printf(" l <UINT> Max data [l]ength (%i)\n", PARAM_DEFAULT_LENGTH);
printf(" n <UINT> [n]umber of flows (%i)\n", PARAM_DEFAULT_NUM);
printf(" s <UINT> Random [s]eed (%i)\n", PARAM_DEFAULT_SEED);
printf(" t <UINT> Number of [t]hreads (%i)\n", PARAM_DEFAULT_THREAD);
exit(1);
}
/* =============================================================================
* parseArgs
* =============================================================================
*/
static void
parseArgs (long argc, char* const argv[])
{
long i;
long opt;
opterr = 0;
while ((opt = getopt(argc, argv, "a:l:n:s:t:")) != -1) {
switch (opt) {
case 'a':
case 'l':
case 'n':
case 's':
case 't':
global_params[(unsigned char)opt] = atol(optarg);
break;
case '?':
default:
opterr++;
break;
}
}
for (i = optind; i < argc; i++) {
fprintf(stderr, "Non-option argument: %s\n", argv[i]);
opterr++;
}
if (opterr) {
displayUsage(argv[0]);
}
}
/* =============================================================================
* processPackets
* =============================================================================
*/
void
processPackets (void* argPtr)
{
TM_THREAD_ENTER();
long threadId = thread_getId();
stream_t* streamPtr = ((arg_t*)argPtr)->streamPtr;
decoder_t* decoderPtr = ((arg_t*)argPtr)->decoderPtr;
vector_t** errorVectors = ((arg_t*)argPtr)->errorVectors;
detector_t* detectorPtr = PDETECTOR_ALLOC();
assert(detectorPtr);
PDETECTOR_ADDPREPROCESSOR(detectorPtr, &preprocessor_toLower);
vector_t* errorVectorPtr = errorVectors[threadId];
while (1) {
char* bytes;
TM_BEGIN();
bytes = TMSTREAM_GETPACKET(streamPtr);
TM_END();
if (!bytes) {
break;
}
packet_t* packetPtr = (packet_t*)bytes;
long flowId = packetPtr->flowId;
int_error_t error;
TM_BEGIN();
error = TMDECODER_PROCESS(decoderPtr,
bytes,
(PACKET_HEADER_LENGTH + packetPtr->length));
TM_END();
if (error) {
/*
* Currently, stream_generate() does not create these errors.
*/
assert(0);
bool_t status = PVECTOR_PUSHBACK(errorVectorPtr, (void*)flowId);
assert(status);
}
char* data;
long decodedFlowId;
TM_BEGIN();
data = TMDECODER_GETCOMPLETE(decoderPtr, &decodedFlowId);
TM_END();
if (data) {
int_error_t error = PDETECTOR_PROCESS(detectorPtr, data);
P_FREE(data);
if (error) {
bool_t status = PVECTOR_PUSHBACK(errorVectorPtr,
(void*)decodedFlowId);
assert(status);
}
}
}
PDETECTOR_FREE(detectorPtr);
TM_THREAD_EXIT();
}
/* =============================================================================
* main
* =============================================================================
*/
MAIN(argc, argv)
{
/*
* Initialization
*/
global_param_init();
parseArgs(argc, (char** const)argv);
long numThread = global_params[PARAM_THREAD];
SIM_GET_NUM_CPU(numThread);
TM_STARTUP(numThread);
P_MEMORY_STARTUP(numThread);
thread_startup(numThread);
long percentAttack = global_params[PARAM_ATTACK];
long maxDataLength = global_params[PARAM_LENGTH];
long numFlow = global_params[PARAM_NUM];
long randomSeed = global_params[PARAM_SEED];
printf("Percent attack = %li\n", percentAttack);
printf("Max data length = %li\n", maxDataLength);
printf("Num flow = %li\n", numFlow);
printf("Random seed = %li\n", randomSeed);
dictionary_t* dictionaryPtr = dictionary_alloc();
assert(dictionaryPtr);
stream_t* streamPtr = stream_alloc(percentAttack);
assert(streamPtr);
long numAttack = stream_generate(streamPtr,
dictionaryPtr,
numFlow,
randomSeed,
maxDataLength);
printf("Num attack = %li\n", numAttack);
decoder_t* decoderPtr = decoder_alloc();
assert(decoderPtr);
vector_t** errorVectors = (vector_t**)SEQ_MALLOC(numThread * sizeof(vector_t*));
assert(errorVectors);
long i;
for (i = 0; i < numThread; i++) {
vector_t* errorVectorPtr = vector_alloc(numFlow);
assert(errorVectorPtr);
errorVectors[i] = errorVectorPtr;
}
arg_t arg;
arg.streamPtr = streamPtr;
arg.decoderPtr = decoderPtr;
arg.errorVectors = errorVectors;
/*
* Run transactions
*/
// NB: Since ASF/PTLSim "REAL" is native execution, and since we are using
// wallclock time, we want to be sure we read time inside the
// simulator, or else we report native cycles spent on the benchmark
// instead of simulator cycles.
GOTO_SIM();
TIMER_T startTime;
TIMER_READ(startTime);
#ifdef OTM
#pragma omp parallel
{
processPackets((void*)&arg);
}
#else
thread_start(processPackets, (void*)&arg);
#endif
TIMER_T stopTime;
TIMER_READ(stopTime);
// NB: As above, timer reads must be done inside of the simulated region
// for PTLSim/ASF
GOTO_REAL();
printf("Elapsed time = %f seconds\n", TIMER_DIFF_SECONDS(startTime, stopTime));
/*
* Check solution
*/
long numFound = 0;
for (i = 0; i < numThread; i++) {
vector_t* errorVectorPtr = errorVectors[i];
long e;
long numError = vector_getSize(errorVectorPtr);
numFound += numError;
for (e = 0; e < numError; e++) {
long flowId = (long)vector_at(errorVectorPtr, e);
bool_t status = stream_isAttack(streamPtr, flowId);
assert(status);
}
}
printf("Num found = %li\n", numFound);
assert(numFound == numAttack);
/*
* Clean up
*/
for (i = 0; i < numThread; i++) {
vector_free(errorVectors[i]);
}
SEQ_FREE(errorVectors);
decoder_free(decoderPtr);
stream_free(streamPtr);
dictionary_free(dictionaryPtr);
TM_SHUTDOWN();
P_MEMORY_SHUTDOWN();
thread_shutdown();
MAIN_RETURN(0);
}
/* =============================================================================
*
* End of intruder.c
*
* =============================================================================
*/
|
data.c | #include "data.h"
#include "utils.h"
#include "image.h"
#include "cuda.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
list *get_paths(char *filename)
{
char *path;
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
list *lines = make_list();
while((path=fgetl(file))){
list_insert(lines, path);
}
fclose(file);
return lines;
}
/*
char **get_random_paths_indexes(char **paths, int n, int m, int *indexes)
{
char **random_paths = calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
for(i = 0; i < n; ++i){
int index = rand()%m;
indexes[i] = index;
random_paths[i] = paths[index];
if(i == 0) printf("%s\n", paths[index]);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
*/
char **get_random_paths(char **paths, int n, int m)
{
char **random_paths = calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
for(i = 0; i < n; ++i){
int index = rand()%m;
random_paths[i] = paths[index];
//if(i == 0) printf("%s\n", paths[index]);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
char **find_replace_paths(char **paths, int n, char *find, char *replace)
{
char **replace_paths = calloc(n, sizeof(char*));
int i;
for(i = 0; i < n; ++i){
char replaced[4096];
find_replace(paths[i], find, replace, replaced);
replace_paths[i] = copy_string(replaced);
}
return replace_paths;
}
matrix load_image_paths_gray(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image(paths[i], w, h, 3);
image gray = grayscale_image(im);
free_image(im);
im = gray;
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_paths(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], w, h);
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_augment_paths(char **paths, int n, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center)
{
int i;
matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], 0, 0);
image crop;
if(center){
crop = center_crop_image(im, size, size);
} else {
crop = random_augment_image(im, angle, aspect, min, max, size, size);
}
int flip = rand()%2;
if (flip) flip_image(crop);
random_distort_image(crop, hue, saturation, exposure);
/*
show_image(im, "orig");
show_image(crop, "crop");
cvWaitKey(0);
*/
//grayscale_image_3c(crop);
free_image(im);
X.vals[i] = crop.data;
X.cols = crop.h*crop.w*crop.c;
}
return X;
}
box_label *read_boxes(char *filename, int *n)
{
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
float x, y, h, w;
int id;
int count = 0;
int size = 64;
box_label *boxes = calloc(size, sizeof(box_label));
while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){
if(count == size) {
size = size * 2;
boxes = realloc(boxes, size*sizeof(box_label));
}
boxes[count].id = id;
boxes[count].x = x;
boxes[count].y = y;
boxes[count].h = h;
boxes[count].w = w;
boxes[count].left = x - w/2;
boxes[count].right = x + w/2;
boxes[count].top = y - h/2;
boxes[count].bottom = y + h/2;
++count;
}
fclose(file);
*n = count;
return boxes;
}
void randomize_boxes(box_label *b, int n)
{
int i;
for(i = 0; i < n; ++i){
box_label swap = b[i];
int index = rand()%n;
b[i] = b[index];
b[index] = swap;
}
}
void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip)
{
int i;
for(i = 0; i < n; ++i){
if(boxes[i].x == 0 && boxes[i].y == 0) {
boxes[i].x = 999999;
boxes[i].y = 999999;
boxes[i].w = 999999;
boxes[i].h = 999999;
continue;
}
boxes[i].left = boxes[i].left * sx - dx;
boxes[i].right = boxes[i].right * sx - dx;
boxes[i].top = boxes[i].top * sy - dy;
boxes[i].bottom = boxes[i].bottom* sy - dy;
if(flip){
float swap = boxes[i].left;
boxes[i].left = 1. - boxes[i].right;
boxes[i].right = 1. - swap;
}
boxes[i].left = constrain(0, 1, boxes[i].left);
boxes[i].right = constrain(0, 1, boxes[i].right);
boxes[i].top = constrain(0, 1, boxes[i].top);
boxes[i].bottom = constrain(0, 1, boxes[i].bottom);
boxes[i].x = (boxes[i].left+boxes[i].right)/2;
boxes[i].y = (boxes[i].top+boxes[i].bottom)/2;
boxes[i].w = (boxes[i].right - boxes[i].left);
boxes[i].h = (boxes[i].bottom - boxes[i].top);
boxes[i].w = constrain(0, 1, boxes[i].w);
boxes[i].h = constrain(0, 1, boxes[i].h);
}
}
void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count && i < 90; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .0 || h < .0) continue;
int index = (4+classes) * i;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
if (id < classes) truth[index+id] = 1;
}
free(boxes);
}
void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".png", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .005 || h < .005) continue;
int col = (int)(x*num_boxes);
int row = (int)(y*num_boxes);
x = x*num_boxes - col;
y = y*num_boxes - row;
int index = (col+row*num_boxes)*(5+classes);
if (truth[index]) continue;
truth[index++] = 1;
if (id < classes) truth[index+id] = 1;
index += classes;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
}
free(boxes);
}
void load_rle(image im, int *rle, int n)
{
int count = 0;
int curr = 0;
int i,j;
for(i = 0; i < n; ++i){
for(j = 0; j < rle[i]; ++j){
im.data[count++] = curr;
}
curr = 1 - curr;
}
for(; count < im.h*im.w*im.c; ++count){
im.data[count] = curr;
}
}
void or_image(image src, image dest, int c)
{
int i;
for(i = 0; i < src.w*src.h; ++i){
if(src.data[i]) dest.data[dest.w*dest.h*c + i] = 1;
}
}
void exclusive_image(image src)
{
int k, j, i;
int s = src.w*src.h;
for(k = 0; k < src.c-1; ++k){
for(i = 0; i < s; ++i){
if (src.data[k*s + i]){
for(j = k+1; j < src.c; ++j){
src.data[j*s + i] = 0;
}
}
}
}
}
box bound_image(image im)
{
int x,y;
int minx = im.w;
int miny = im.h;
int maxx = 0;
int maxy = 0;
for(y = 0; y < im.h; ++y){
for(x = 0; x < im.w; ++x){
if(im.data[y*im.w + x]){
minx = (x < minx) ? x : minx;
miny = (y < miny) ? y : miny;
maxx = (x > maxx) ? x : maxx;
maxy = (y > maxy) ? y : maxy;
}
}
}
box b = {minx, miny, maxx-minx + 1, maxy-miny + 1};
//printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
return b;
}
void fill_truth_iseg(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
int i = 0;
int j;
image part = make_image(w, h, 1);
while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect);
if(flip) flip_image(sized);
image mask = resize_image(sized, mw, mh);
truth[i*(mw*mh+1)] = id;
for(j = 0; j < mw*mh; ++j){
truth[i*(mw*mh + 1) + 1 + j] = mask.data[j];
}
++i;
free_image(mask);
free_image(sized);
free(rle);
}
if(i < num_boxes) truth[i*(mw*mh+1)] = -1;
fclose(file);
free_image(part);
}
void fill_truth_mask(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
int i = 0;
image part = make_image(w, h, 1);
while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect);
if(flip) flip_image(sized);
box b = bound_image(sized);
if(b.w > 0){
image crop = crop_image(sized, b.x, b.y, b.w, b.h);
image mask = resize_image(crop, mw, mh);
truth[i*(4 + mw*mh + 1) + 0] = (b.x + b.w/2.)/sized.w;
truth[i*(4 + mw*mh + 1) + 1] = (b.y + b.h/2.)/sized.h;
truth[i*(4 + mw*mh + 1) + 2] = b.w/sized.w;
truth[i*(4 + mw*mh + 1) + 3] = b.h/sized.h;
int j;
for(j = 0; j < mw*mh; ++j){
truth[i*(4 + mw*mh + 1) + 4 + j] = mask.data[j];
}
truth[i*(4 + mw*mh + 1) + 4 + mw*mh] = id;
free_image(crop);
free_image(mask);
++i;
}
free_image(sized);
free(rle);
}
fclose(file);
free_image(part);
}
void fill_truth_detection(char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, "raw", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".png", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
if(count > num_boxes) count = num_boxes;
float x,y,w,h;
int id;
int i;
int sub = 0;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if ((w < .001 || h < .001)) {
++sub;
continue;
}
truth[(i-sub)*5+0] = x;
truth[(i-sub)*5+1] = y;
truth[(i-sub)*5+2] = w;
truth[(i-sub)*5+3] = h;
truth[(i-sub)*5+4] = id;
}
free(boxes);
}
#define NUMCHARS 37
void print_letters(float *pred, int n)
{
int i;
for(i = 0; i < n; ++i){
int index = max_index(pred+i*NUMCHARS, NUMCHARS);
printf("%c", int_to_alphanum(index));
}
printf("\n");
}
void fill_truth_captcha(char *path, int n, float *truth)
{
char *begin = strrchr(path, '/');
++begin;
int i;
for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){
int index = alphanum_to_int(begin[i]);
if(index > 35) printf("Bad %c\n", begin[i]);
truth[i*NUMCHARS+index] = 1;
}
for(;i < n; ++i){
truth[i*NUMCHARS + NUMCHARS-1] = 1;
}
}
data load_data_captcha(char **paths, int n, int m, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = make_matrix(n, k*NUMCHARS);
int i;
for(i = 0; i < n; ++i){
fill_truth_captcha(paths[i], k, d.y.vals[i]);
}
if(m) free(paths);
return d;
}
data load_data_captcha_encode(char **paths, int n, int m, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.X.cols = 17100;
d.y = d.X;
if(m) free(paths);
return d;
}
void fill_truth(char *path, char **labels, int k, float *truth)
{
int i;
memset(truth, 0, k*sizeof(float));
int count = 0;
for(i = 0; i < k; ++i){
if(strstr(path, labels[i])){
truth[i] = 1;
++count;
// printf("%s %s %d\n", path, labels[i], i);
}
}
if(count != 1 && (k != 1 || count != 0)) printf("Too many or too few labels: %d, %s\n", count, path);
}
void fill_hierarchy(float *truth, int k, tree *hierarchy)
{
int j;
for(j = 0; j < k; ++j){
if(truth[j]){
int parent = hierarchy->parent[j];
while(parent >= 0){
truth[parent] = 1;
parent = hierarchy->parent[parent];
}
}
}
int i;
int count = 0;
for(j = 0; j < hierarchy->groups; ++j){
//printf("%d\n", count);
int mask = 1;
for(i = 0; i < hierarchy->group_size[j]; ++i){
if(truth[count + i]){
mask = 0;
break;
}
}
if (mask) {
for(i = 0; i < hierarchy->group_size[j]; ++i){
truth[count + i] = SECRET_NUM;
}
}
count += hierarchy->group_size[j];
}
}
matrix load_regression_labels_paths(char **paths, int n, int k)
{
matrix y = make_matrix(n, k);
int i,j;
for(i = 0; i < n; ++i){
char labelpath[4096];
find_replace(paths[i], "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".BMP", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPeG", ".txt", labelpath);
find_replace(labelpath, ".Jpeg", ".txt", labelpath);
find_replace(labelpath, ".PNG", ".txt", labelpath);
find_replace(labelpath, ".TIF", ".txt", labelpath);
find_replace(labelpath, ".bmp", ".txt", labelpath);
find_replace(labelpath, ".jpeg", ".txt", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".png", ".txt", labelpath);
find_replace(labelpath, ".tif", ".txt", labelpath);
FILE *file = fopen(labelpath, "r");
for(j = 0; j < k; ++j){
fscanf(file, "%f", &(y.vals[i][j]));
}
fclose(file);
}
return y;
}
matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy)
{
matrix y = make_matrix(n, k);
int i;
for(i = 0; i < n && labels; ++i){
fill_truth(paths[i], labels, k, y.vals[i]);
if(hierarchy){
fill_hierarchy(y.vals[i], k, hierarchy);
}
}
return y;
}
matrix load_tags_paths(char **paths, int n, int k)
{
matrix y = make_matrix(n, k);
int i;
//int count = 0;
for(i = 0; i < n; ++i){
char label[4096];
find_replace(paths[i], "images", "labels", label);
find_replace(label, ".jpg", ".txt", label);
FILE *file = fopen(label, "r");
if (!file) continue;
//++count;
int tag;
while(fscanf(file, "%d", &tag) == 1){
if(tag < k){
y.vals[i][tag] = 1;
}
}
fclose(file);
}
//printf("%d/%d\n", count, n);
return y;
}
char **get_labels(char *filename)
{
list *plist = get_paths(filename);
char **labels = (char **)list_to_array(plist);
free_list(plist);
return labels;
}
void free_data(data d)
{
if(!d.shallow){
free_matrix(d.X);
free_matrix(d.y);
}else{
free(d.X.vals);
free(d.y.vals);
}
}
image get_segmentation_image(char *path, int w, int h, int classes)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
image mask = make_image(w, h, classes);
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
image part = make_image(w, h, 1);
while(fscanf(file, "%d %s", &id, buff) == 2){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
or_image(part, mask, id);
free(rle);
}
//exclusive_image(mask);
fclose(file);
free_image(part);
return mask;
}
image get_segmentation_image2(char *path, int w, int h, int classes)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
image mask = make_image(w, h, classes+1);
int i;
for(i = 0; i < w*h; ++i){
mask.data[w*h*classes + i] = 1;
}
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
image part = make_image(w, h, 1);
while(fscanf(file, "%d %s", &id, buff) == 2){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
or_image(part, mask, id);
for(i = 0; i < w*h; ++i){
if(part.data[i]) mask.data[w*h*classes + i] = 0;
}
free(rle);
}
//exclusive_image(mask);
fclose(file);
free_image(part);
return mask;
}
data load_data_seg(int n, char **paths, int m, int w, int h, int classes, int min, int max, float angle, float aspect, float hue, float saturation, float exposure, int div)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y.rows = n;
d.y.cols = h*w*classes/div/div;
d.y.vals = calloc(d.X.rows, sizeof(float*));
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h);
image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
int flip = rand()%2;
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
image mask = get_segmentation_image(random_paths[i], orig.w, orig.h, classes);
//image mask = make_image(orig.w, orig.h, classes+1);
image sized_m = rotate_crop_image(mask, a.rad, a.scale/div, a.w/div, a.h/div, a.dx/div, a.dy/div, a.aspect);
if(flip) flip_image(sized_m);
d.y.vals[i] = sized_m.data;
free_image(orig);
free_image(mask);
/*
image rgb = mask_to_rgb(sized_m, classes);
show_image(rgb, "part");
show_image(sized, "orig");
cvWaitKey(0);
free_image(rgb);
*/
}
free(random_paths);
return d;
}
data load_data_iseg(int n, char **paths, int m, int w, int h, int classes, int boxes, int div, int min, int max, float angle, float aspect, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y = make_matrix(n, (((w/div)*(h/div))+1)*boxes);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h);
image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
int flip = rand()%2;
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
//show_image(sized, "image");
fill_truth_iseg(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, w/div, h/div);
free_image(orig);
/*
image rgb = mask_to_rgb(sized_m, classes);
show_image(rgb, "part");
show_image(sized, "orig");
cvWaitKey(0);
free_image(rgb);
*/
}
free(random_paths);
return d;
}
data load_data_mask(int n, char **paths, int m, int w, int h, int classes, int boxes, int coords, int min, int max, float angle, float aspect, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y = make_matrix(n, (coords+1)*boxes);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h);
image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
int flip = rand()%2;
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
//show_image(sized, "image");
fill_truth_mask(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, 14, 14);
free_image(orig);
/*
image rgb = mask_to_rgb(sized_m, classes);
show_image(rgb, "part");
show_image(sized, "orig");
cvWaitKey(0);
free_image(rgb);
*/
}
free(random_paths);
return d;
}
data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = size*size*(5+classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
int oh = orig.h;
int ow = orig.w;
int dw = (ow*jitter);
int dh = (oh*jitter);
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = ow - pleft - pright;
int sheight = oh - ptop - pbot;
float sx = (float)swidth / ow;
float sy = (float)sheight / oh;
int flip = rand()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/ow)/sx;
float dy = ((float)ptop /oh)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
}
free(random_paths);
return d;
}
data load_data_compare(int n, char **paths, int m, int classes, int w, int h)
{
if(m) paths = get_random_paths(paths, 2*n, m);
int i,j;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*6;
int k = 2*(classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image im1 = load_image_color(paths[i*2], w, h);
image im2 = load_image_color(paths[i*2+1], w, h);
d.X.vals[i] = calloc(d.X.cols, sizeof(float));
memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float));
memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float));
int id;
float iou;
char imlabel1[4096];
char imlabel2[4096];
find_replace(paths[i*2], "imgs", "labels", imlabel1);
find_replace(imlabel1, "jpg", "txt", imlabel1);
FILE *fp1 = fopen(imlabel1, "r");
while(fscanf(fp1, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou;
}
find_replace(paths[i*2+1], "imgs", "labels", imlabel2);
find_replace(imlabel2, "jpg", "txt", imlabel2);
FILE *fp2 = fopen(imlabel2, "r");
while(fscanf(fp2, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou;
}
for (j = 0; j < classes; ++j){
if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){
d.y.vals[i][2*j] = 1;
d.y.vals[i][2*j+1] = 0;
} else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){
d.y.vals[i][2*j] = 0;
d.y.vals[i][2*j+1] = 1;
} else {
d.y.vals[i][2*j] = SECRET_NUM;
d.y.vals[i][2*j+1] = SECRET_NUM;
}
}
fclose(fp1);
fclose(fp2);
free_image(im1);
free_image(im2);
}
if(m) free(paths);
return d;
}
data load_data_swag(char **paths, int n, int classes, float jitter)
{
int index = rand()%n;
char *random_path = paths[index];
image orig = load_image_color(random_path, 0, 0);
int h = orig.h;
int w = orig.w;
data d = {0};
d.shallow = 0;
d.w = w;
d.h = h;
d.X.rows = 1;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = (4+classes)*90;
d.y = make_matrix(1, k);
int dw = w*jitter;
int dh = h*jitter;
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = w - pleft - pright;
int sheight = h - ptop - pbot;
float sx = (float)swidth / w;
float sy = (float)sheight / h;
int flip = rand()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/w)/sx;
float dy = ((float)ptop /h)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
d.X.vals[0] = sized.data;
fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
return d;
}
data load_data_detection(int n, char **paths, int m, int w, int h, int boxes, int classes, float jitter, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y = make_matrix(n, 5*boxes);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
image sized = make_image(w, h, orig.c);
fill_image(sized, .5);
float dw = jitter * orig.w;
float dh = jitter * orig.h;
float new_ar = (orig.w + rand_uniform(-dw, dw)) / (orig.h + rand_uniform(-dh, dh));
//float scale = rand_uniform(.25, 2);
float scale = 1;
float nw, nh;
if(new_ar < 1){
nh = scale * h;
nw = nh * new_ar;
} else {
nw = scale * w;
nh = nw / new_ar;
}
float dx = rand_uniform(0, w - nw);
float dy = rand_uniform(0, h - nh);
place_image(orig, nw, nh, dx, dy, sized);
random_distort_image(sized, hue, saturation, exposure);
int flip = rand()%2;
if(flip) flip_image(sized);
d.X.vals[i] = sized.data;
fill_truth_detection(random_paths[i], boxes, d.y.vals[i], classes, flip, -dx/w, -dy/h, nw/w, nh/h);
free_image(orig);
}
free(random_paths);
return d;
}
void *load_thread(void *ptr)
{
//printf("Loading data: %d\n", rand());
load_args a = *(struct load_args*)ptr;
if(a.exposure == 0) a.exposure = 1;
if(a.saturation == 0) a.saturation = 1;
if(a.aspect == 0) a.aspect = 1;
if (a.type == OLD_CLASSIFICATION_DATA){
*a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h);
} else if (a.type == REGRESSION_DATA){
*a.d = load_data_regression(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
} else if (a.type == CLASSIFICATION_DATA){
*a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.center);
} else if (a.type == SUPER_DATA){
*a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale);
} else if (a.type == WRITING_DATA){
*a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h);
} else if (a.type == ISEG_DATA){
*a.d = load_data_iseg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.scale, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
} else if (a.type == INSTANCE_DATA){
*a.d = load_data_mask(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.coords, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
} else if (a.type == SEGMENTATION_DATA){
*a.d = load_data_seg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.scale);
} else if (a.type == REGION_DATA){
*a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure);
} else if (a.type == DETECTION_DATA){
*a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure);
} else if (a.type == SWAG_DATA){
*a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter);
} else if (a.type == COMPARE_DATA){
*a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h);
} else if (a.type == IMAGE_DATA){
*(a.im) = load_image_color(a.path, 0, 0);
*(a.resized) = resize_image(*(a.im), a.w, a.h);
} else if (a.type == LETTERBOX_DATA){
*(a.im) = load_image_color(a.path, 0, 0);
*(a.resized) = letterbox_image(*(a.im), a.w, a.h);
} else if (a.type == TAG_DATA){
*a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
}
free(ptr);
return 0;
}
pthread_t load_data_in_thread(load_args args)
{
pthread_t thread;
struct load_args *ptr = calloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed");
return thread;
}
void *load_threads(void *ptr)
{
int i;
load_args args = *(load_args *)ptr;
if (args.threads == 0) args.threads = 1;
data *out = args.d;
int total = args.n;
free(ptr);
data *buffers = calloc(args.threads, sizeof(data));
pthread_t *threads = calloc(args.threads, sizeof(pthread_t));
for(i = 0; i < args.threads; ++i){
args.d = buffers + i;
args.n = (i+1) * total/args.threads - i * total/args.threads;
threads[i] = load_data_in_thread(args);
}
for(i = 0; i < args.threads; ++i){
pthread_join(threads[i], 0);
}
*out = concat_datas(buffers, args.threads);
out->shallow = 0;
for(i = 0; i < args.threads; ++i){
buffers[i].shallow = 1;
free_data(buffers[i]);
}
free(buffers);
free(threads);
return 0;
}
void load_data_blocking(load_args args)
{
struct load_args *ptr = calloc(1, sizeof(struct load_args));
*ptr = args;
load_thread(ptr);
}
pthread_t load_data(load_args args)
{
pthread_t thread;
struct load_args *ptr = calloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed");
return thread;
}
data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h)
{
if(m) paths = get_random_paths(paths, n, m);
char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png");
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_image_paths_gray(replace_paths, n, out_w, out_h);
if(m) free(paths);
int i;
for(i = 0; i < n; ++i) free(replace_paths[i]);
free(replace_paths);
return d;
}
data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_labels_paths(paths, n, labels, k, 0);
if(m) free(paths);
return d;
}
/*
data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
data d = {0};
d.indexes = calloc(n, sizeof(int));
if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes);
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure);
d.y = load_labels_paths(paths, n, labels, k);
if(m) free(paths);
return d;
}
*/
data load_data_super(char **paths, int n, int m, int w, int h, int scale)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
int i;
d.X.rows = n;
d.X.vals = calloc(n, sizeof(float*));
d.X.cols = w*h*3;
d.y.rows = n;
d.y.vals = calloc(n, sizeof(float*));
d.y.cols = w*scale * h*scale * 3;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], 0, 0);
image crop = random_crop_image(im, w*scale, h*scale);
int flip = rand()%2;
if (flip) flip_image(crop);
image resize = resize_image(crop, w, h);
d.X.vals[i] = resize.data;
d.y.vals[i] = crop.data;
free_image(im);
}
if(m) free(paths);
return d;
}
data load_data_regression(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0);
d.y = load_regression_labels_paths(paths, n, k);
if(m) free(paths);
return d;
}
data select_data(data *orig, int *inds)
{
data d = {0};
d.shallow = 1;
d.w = orig[0].w;
d.h = orig[0].h;
d.X.rows = orig[0].X.rows;
d.y.rows = orig[0].X.rows;
d.X.cols = orig[0].X.cols;
d.y.cols = orig[0].y.cols;
d.X.vals = calloc(orig[0].X.rows, sizeof(float *));
d.y.vals = calloc(orig[0].y.rows, sizeof(float *));
int i;
for(i = 0; i < d.X.rows; ++i){
d.X.vals[i] = orig[inds[i]].X.vals[i];
d.y.vals[i] = orig[inds[i]].y.vals[i];
}
return d;
}
data *tile_data(data orig, int divs, int size)
{
data *ds = calloc(divs*divs, sizeof(data));
int i, j;
#pragma omp parallel for
for(i = 0; i < divs*divs; ++i){
data d;
d.shallow = 0;
d.w = orig.w/divs * size;
d.h = orig.h/divs * size;
d.X.rows = orig.X.rows;
d.X.cols = d.w*d.h*3;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.y = copy_matrix(orig.y);
#pragma omp parallel for
for(j = 0; j < orig.X.rows; ++j){
int x = (i%divs) * orig.w / divs - (d.w - orig.w/divs)/2;
int y = (i/divs) * orig.h / divs - (d.h - orig.h/divs)/2;
image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[j]);
d.X.vals[j] = crop_image(im, x, y, d.w, d.h).data;
}
ds[i] = d;
}
return ds;
}
data resize_data(data orig, int w, int h)
{
data d = {0};
d.shallow = 0;
d.w = w;
d.h = h;
int i;
d.X.rows = orig.X.rows;
d.X.cols = w*h*3;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.y = copy_matrix(orig.y);
#pragma omp parallel for
for(i = 0; i < orig.X.rows; ++i){
image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[i]);
d.X.vals[i] = resize_image(im, w, h).data;
}
return d;
}
data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.w=size;
d.h=size;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, center);
d.y = load_labels_paths(paths, n, labels, k, hierarchy);
if(m) free(paths);
return d;
}
data load_data_tag(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.w = size;
d.h = size;
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0);
d.y = load_tags_paths(paths, n, k);
if(m) free(paths);
return d;
}
matrix concat_matrix(matrix m1, matrix m2)
{
int i, count = 0;
matrix m;
m.cols = m1.cols;
m.rows = m1.rows+m2.rows;
m.vals = calloc(m1.rows + m2.rows, sizeof(float*));
for(i = 0; i < m1.rows; ++i){
m.vals[count++] = m1.vals[i];
}
for(i = 0; i < m2.rows; ++i){
m.vals[count++] = m2.vals[i];
}
return m;
}
data concat_data(data d1, data d2)
{
data d = {0};
d.shallow = 1;
d.X = concat_matrix(d1.X, d2.X);
d.y = concat_matrix(d1.y, d2.y);
d.w = d1.w;
d.h = d1.h;
return d;
}
data concat_datas(data *d, int n)
{
int i;
data out = {0};
for(i = 0; i < n; ++i){
data new = concat_data(d[i], out);
free_data(out);
out = new;
}
return out;
}
data load_categorical_data_csv(char *filename, int target, int k)
{
data d = {0};
d.shallow = 0;
matrix X = csv_to_matrix(filename);
float *truth_1d = pop_column(&X, target);
float **truth = one_hot_encode(truth_1d, X.rows, k);
matrix y;
y.rows = X.rows;
y.cols = k;
y.vals = truth;
d.X = X;
d.y = y;
free(truth_1d);
return d;
}
data load_cifar10_data(char *filename)
{
data d = {0};
d.shallow = 0;
long i,j;
matrix X = make_matrix(10000, 3072);
matrix y = make_matrix(10000, 10);
d.X = X;
d.y = y;
FILE *fp = fopen(filename, "rb");
if(!fp) file_error(filename);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class = bytes[0];
y.vals[i][class] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i][j] = (double)bytes[j+1];
}
}
scale_data_rows(d, 1./255);
//normalize_data_rows(d);
fclose(fp);
return d;
}
void get_random_batch(data d, int n, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = rand()%d.X.rows;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void get_next_batch(data d, int n, int offset, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = offset + j;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
if(y) memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void smooth_data(data d)
{
int i, j;
float scale = 1. / d.y.cols;
float eps = .1;
for(i = 0; i < d.y.rows; ++i){
for(j = 0; j < d.y.cols; ++j){
d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j];
}
}
}
data load_all_cifar10()
{
data d = {0};
d.shallow = 0;
int i,j,b;
matrix X = make_matrix(50000, 3072);
matrix y = make_matrix(50000, 10);
d.X = X;
d.y = y;
for(b = 0; b < 5; ++b){
char buff[256];
sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1);
FILE *fp = fopen(buff, "rb");
if(!fp) file_error(buff);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class = bytes[0];
y.vals[i+b*10000][class] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i+b*10000][j] = (double)bytes[j+1];
}
}
fclose(fp);
}
//normalize_data_rows(d);
scale_data_rows(d, 1./255);
smooth_data(d);
return d;
}
data load_go(char *filename)
{
FILE *fp = fopen(filename, "rb");
matrix X = make_matrix(3363059, 361);
matrix y = make_matrix(3363059, 361);
int row, col;
if(!fp) file_error(filename);
char *label;
int count = 0;
while((label = fgetl(fp))){
int i;
if(count == X.rows){
X = resize_matrix(X, count*2);
y = resize_matrix(y, count*2);
}
sscanf(label, "%d %d", &row, &col);
char *board = fgetl(fp);
int index = row*19 + col;
y.vals[count][index] = 1;
for(i = 0; i < 19*19; ++i){
float val = 0;
if(board[i] == '1') val = 1;
else if(board[i] == '2') val = -1;
X.vals[count][i] = val;
}
++count;
free(label);
free(board);
}
X = resize_matrix(X, count);
y = resize_matrix(y, count);
data d = {0};
d.shallow = 0;
d.X = X;
d.y = y;
fclose(fp);
return d;
}
void randomize_data(data d)
{
int i;
for(i = d.X.rows-1; i > 0; --i){
int index = rand()%i;
float *swap = d.X.vals[index];
d.X.vals[index] = d.X.vals[i];
d.X.vals[i] = swap;
swap = d.y.vals[index];
d.y.vals[index] = d.y.vals[i];
d.y.vals[i] = swap;
}
}
void scale_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
scale_array(d.X.vals[i], d.X.cols, s);
}
}
void translate_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
translate_array(d.X.vals[i], d.X.cols, s);
}
}
data copy_data(data d)
{
data c = {0};
c.w = d.w;
c.h = d.h;
c.shallow = 0;
c.num_boxes = d.num_boxes;
c.boxes = d.boxes;
c.X = copy_matrix(d.X);
c.y = copy_matrix(d.y);
return c;
}
void normalize_data_rows(data d)
{
int i;
for(i = 0; i < d.X.rows; ++i){
normalize_array(d.X.vals[i], d.X.cols);
}
}
data get_data_part(data d, int part, int total)
{
data p = {0};
p.shallow = 1;
p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total;
p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total;
p.X.cols = d.X.cols;
p.y.cols = d.y.cols;
p.X.vals = d.X.vals + d.X.rows * part / total;
p.y.vals = d.y.vals + d.y.rows * part / total;
return p;
}
data get_random_data(data d, int num)
{
data r = {0};
r.shallow = 1;
r.X.rows = num;
r.y.rows = num;
r.X.cols = d.X.cols;
r.y.cols = d.y.cols;
r.X.vals = calloc(num, sizeof(float *));
r.y.vals = calloc(num, sizeof(float *));
int i;
for(i = 0; i < num; ++i){
int index = rand()%d.X.rows;
r.X.vals[i] = d.X.vals[index];
r.y.vals[i] = d.y.vals[index];
}
return r;
}
data *split_data(data d, int part, int total)
{
data *split = calloc(2, sizeof(data));
int i;
int start = part*d.X.rows/total;
int end = (part+1)*d.X.rows/total;
data train;
data test;
train.shallow = test.shallow = 1;
test.X.rows = test.y.rows = end-start;
train.X.rows = train.y.rows = d.X.rows - (end-start);
train.X.cols = test.X.cols = d.X.cols;
train.y.cols = test.y.cols = d.y.cols;
train.X.vals = calloc(train.X.rows, sizeof(float*));
test.X.vals = calloc(test.X.rows, sizeof(float*));
train.y.vals = calloc(train.y.rows, sizeof(float*));
test.y.vals = calloc(test.y.rows, sizeof(float*));
for(i = 0; i < start; ++i){
train.X.vals[i] = d.X.vals[i];
train.y.vals[i] = d.y.vals[i];
}
for(i = start; i < end; ++i){
test.X.vals[i-start] = d.X.vals[i];
test.y.vals[i-start] = d.y.vals[i];
}
for(i = end; i < d.X.rows; ++i){
train.X.vals[i-(end-start)] = d.X.vals[i];
train.y.vals[i-(end-start)] = d.y.vals[i];
}
split[0] = train;
split[1] = test;
return split;
}
|
locks_example.c | //===-- locks_example.c - Example for lock usage ------------------*- C -*-===//
//
// Part of the LOMP Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include <stdio.h>
#include <omp.h>
int main(void) {
double d = 42.0;
float f = 21.42;
int x = 21;
omp_lock_t lock;
omp_init_lock(&lock);
printf("Before parallel region\n");
printf("=======================================\n");
#pragma omp parallel shared(x, d, f) shared(lock)
{
omp_set_lock(&lock);
printf("Hello World: ");
printf("my secret is %lf ", d + f);
printf("and %d\n", x);
omp_unset_lock(&lock);
}
printf("=======================================\n");
printf("After parallel region\n");
omp_destroy_lock(&lock);
return 0;
}
|
box3d2r.c | #define BENCH_DIM 3
#define BENCH_FPP 249
#define BENCH_RAD 2
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize][dimsize]
= (SB_TYPE (*)[dimsize][dimsize][dimsize])A1;
if (scop) {
#pragma scop
for (int t = 0; t < timestep; t++)
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++)
A[(t+1)%2][i][j][k] =
-0.324f*A[t%2][i-2][j][k] +
0.0020f*A[t%2][i-2][j-2][k-2] +
0.0030f*A[t%2][i-2][j-2][k-1] +
0.0040f*A[t%2][i-2][j-2][k] +
0.0050f*A[t%2][i-2][j-2][k+1] +
0.0060f*A[t%2][i-2][j-2][k+2] +
0.0070f*A[t%2][i-2][j-1][k-2] +
0.0080f*A[t%2][i-2][j-1][k-1] +
0.0090f*A[t%2][i-2][j-1][k] +
0.0100f*A[t%2][i-2][j-1][k+1] +
0.0110f*A[t%2][i-2][j-1][k+2] +
0.0120f*A[t%2][i-2][j][k-2] +
0.0130f*A[t%2][i-2][j][k-1] +
0.0140f*A[t%2][i-2][j][k+1] +
0.0150f*A[t%2][i-2][j][k+2] +
0.0160f*A[t%2][i-2][j+1][k-2] +
0.0170f*A[t%2][i-2][j+1][k-1] +
0.0180f*A[t%2][i-2][j+1][k] +
0.0190f*A[t%2][i-2][j+1][k+1] +
0.0200f*A[t%2][i-2][j+1][k+2] +
0.0210f*A[t%2][i-2][j+2][k-2] +
0.0220f*A[t%2][i-2][j+2][k-1] +
0.0230f*A[t%2][i-2][j+2][k] +
0.0240f*A[t%2][i-2][j+2][k+1] +
0.0250f*A[t%2][i-2][j+2][k+2] -
0.3264f*A[t%2][i-1][j][k] +
0.0021f*A[t%2][i-1][j-2][k-2] +
0.0031f*A[t%2][i-1][j-2][k-1] +
0.0041f*A[t%2][i-1][j-2][k] +
0.0051f*A[t%2][i-1][j-2][k+1] +
0.0061f*A[t%2][i-1][j-2][k+2] +
0.0071f*A[t%2][i-1][j-1][k-2] +
0.0081f*A[t%2][i-1][j-1][k-1] +
0.0091f*A[t%2][i-1][j-1][k] +
0.0101f*A[t%2][i-1][j-1][k+1] +
0.0111f*A[t%2][i-1][j-1][k+2] +
0.0121f*A[t%2][i-1][j][k-2] +
0.0131f*A[t%2][i-1][j][k-1] +
0.0141f*A[t%2][i-1][j][k+1] +
0.0151f*A[t%2][i-1][j][k+2] +
0.0161f*A[t%2][i-1][j+1][k-2] +
0.0171f*A[t%2][i-1][j+1][k-1] +
0.0181f*A[t%2][i-1][j+1][k] +
0.0191f*A[t%2][i-1][j+1][k+1] +
0.0201f*A[t%2][i-1][j+1][k+2] +
0.0211f*A[t%2][i-1][j+2][k-2] +
0.0221f*A[t%2][i-1][j+2][k-1] +
0.0231f*A[t%2][i-1][j+2][k] +
0.0241f*A[t%2][i-1][j+2][k+1] +
0.0251f*A[t%2][i-1][j+2][k+2] +
0.6712f*A[t%2][i][j][k] +
0.0022f*A[t%2][i][j-2][k-2] +
0.0032f*A[t%2][i][j-2][k-1] +
0.0042f*A[t%2][i][j-2][k] +
0.0052f*A[t%2][i][j-2][k+1] +
0.0062f*A[t%2][i][j-2][k+2] +
0.0072f*A[t%2][i][j-1][k-2] +
0.0082f*A[t%2][i][j-1][k-1] +
0.0092f*A[t%2][i][j-1][k] +
0.0102f*A[t%2][i][j-1][k+1] +
0.0112f*A[t%2][i][j-1][k+2] +
0.0122f*A[t%2][i][j][k-2] +
0.0132f*A[t%2][i][j][k-1] +
0.0142f*A[t%2][i][j][k+1] +
0.0152f*A[t%2][i][j][k+2] +
0.0162f*A[t%2][i][j+1][k-2] +
0.0172f*A[t%2][i][j+1][k-1] +
0.0182f*A[t%2][i][j+1][k] +
0.0192f*A[t%2][i][j+1][k+1] +
0.0202f*A[t%2][i][j+1][k+2] +
0.0212f*A[t%2][i][j+2][k-2] +
0.0222f*A[t%2][i][j+2][k-1] +
0.0232f*A[t%2][i][j+2][k] +
0.0242f*A[t%2][i][j+2][k+1] +
0.0252f*A[t%2][i][j+2][k+2] -
0.3312f*A[t%2][i+1][j][k] +
0.0023f*A[t%2][i+1][j-2][k-2] +
0.0033f*A[t%2][i+1][j-2][k-1] +
0.0043f*A[t%2][i+1][j-2][k] +
0.0053f*A[t%2][i+1][j-2][k+1] +
0.0063f*A[t%2][i+1][j-2][k+2] +
0.0073f*A[t%2][i+1][j-1][k-2] +
0.0083f*A[t%2][i+1][j-1][k-1] +
0.0093f*A[t%2][i+1][j-1][k] +
0.0103f*A[t%2][i+1][j-1][k+1] +
0.0113f*A[t%2][i+1][j-1][k+2] +
0.0123f*A[t%2][i+1][j][k-2] +
0.0133f*A[t%2][i+1][j][k-1] +
0.0143f*A[t%2][i+1][j][k+1] +
0.0153f*A[t%2][i+1][j][k+2] +
0.0163f*A[t%2][i+1][j+1][k-2] +
0.0173f*A[t%2][i+1][j+1][k-1] +
0.0183f*A[t%2][i+1][j+1][k] +
0.0193f*A[t%2][i+1][j+1][k+1] +
0.0203f*A[t%2][i+1][j+1][k+2] +
0.0213f*A[t%2][i+1][j+2][k-2] +
0.0223f*A[t%2][i+1][j+2][k-1] +
0.0233f*A[t%2][i+1][j+2][k] +
0.0243f*A[t%2][i+1][j+2][k+1] +
0.0253f*A[t%2][i+1][j+2][k+2] -
0.3336f*A[t%2][i+2][j][k] +
0.0024f*A[t%2][i+2][j-2][k-2] +
0.0034f*A[t%2][i+2][j-2][k-1] +
0.0044f*A[t%2][i+2][j-2][k] +
0.0054f*A[t%2][i+2][j-2][k+1] +
0.0064f*A[t%2][i+2][j-2][k+2] +
0.0074f*A[t%2][i+2][j-1][k-2] +
0.0084f*A[t%2][i+2][j-1][k-1] +
0.0094f*A[t%2][i+2][j-1][k] +
0.0104f*A[t%2][i+2][j-1][k+1] +
0.0114f*A[t%2][i+2][j-1][k+2] +
0.0124f*A[t%2][i+2][j][k-2] +
0.0134f*A[t%2][i+2][j][k-1] +
0.0144f*A[t%2][i+2][j][k+1] +
0.0154f*A[t%2][i+2][j][k+2] +
0.0164f*A[t%2][i+2][j+1][k-2] +
0.0174f*A[t%2][i+2][j+1][k-1] +
0.0184f*A[t%2][i+2][j+1][k] +
0.0194f*A[t%2][i+2][j+1][k+1] +
0.0204f*A[t%2][i+2][j+1][k+2] +
0.0214f*A[t%2][i+2][j+2][k-2] +
0.0224f*A[t%2][i+2][j+2][k-1] +
0.0234f*A[t%2][i+2][j+2][k] +
0.0244f*A[t%2][i+2][j+2][k+1] +
0.0254f*A[t%2][i+2][j+2][k+2];
#pragma endscop
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++)
A[(t+1)%2][i][j][k] =
-0.324f*A[t%2][i-2][j][k] +
0.0020f*A[t%2][i-2][j-2][k-2] +
0.0030f*A[t%2][i-2][j-2][k-1] +
0.0040f*A[t%2][i-2][j-2][k] +
0.0050f*A[t%2][i-2][j-2][k+1] +
0.0060f*A[t%2][i-2][j-2][k+2] +
0.0070f*A[t%2][i-2][j-1][k-2] +
0.0080f*A[t%2][i-2][j-1][k-1] +
0.0090f*A[t%2][i-2][j-1][k] +
0.0100f*A[t%2][i-2][j-1][k+1] +
0.0110f*A[t%2][i-2][j-1][k+2] +
0.0120f*A[t%2][i-2][j][k-2] +
0.0130f*A[t%2][i-2][j][k-1] +
0.0140f*A[t%2][i-2][j][k+1] +
0.0150f*A[t%2][i-2][j][k+2] +
0.0160f*A[t%2][i-2][j+1][k-2] +
0.0170f*A[t%2][i-2][j+1][k-1] +
0.0180f*A[t%2][i-2][j+1][k] +
0.0190f*A[t%2][i-2][j+1][k+1] +
0.0200f*A[t%2][i-2][j+1][k+2] +
0.0210f*A[t%2][i-2][j+2][k-2] +
0.0220f*A[t%2][i-2][j+2][k-1] +
0.0230f*A[t%2][i-2][j+2][k] +
0.0240f*A[t%2][i-2][j+2][k+1] +
0.0250f*A[t%2][i-2][j+2][k+2] -
0.3264f*A[t%2][i-1][j][k] +
0.0021f*A[t%2][i-1][j-2][k-2] +
0.0031f*A[t%2][i-1][j-2][k-1] +
0.0041f*A[t%2][i-1][j-2][k] +
0.0051f*A[t%2][i-1][j-2][k+1] +
0.0061f*A[t%2][i-1][j-2][k+2] +
0.0071f*A[t%2][i-1][j-1][k-2] +
0.0081f*A[t%2][i-1][j-1][k-1] +
0.0091f*A[t%2][i-1][j-1][k] +
0.0101f*A[t%2][i-1][j-1][k+1] +
0.0111f*A[t%2][i-1][j-1][k+2] +
0.0121f*A[t%2][i-1][j][k-2] +
0.0131f*A[t%2][i-1][j][k-1] +
0.0141f*A[t%2][i-1][j][k+1] +
0.0151f*A[t%2][i-1][j][k+2] +
0.0161f*A[t%2][i-1][j+1][k-2] +
0.0171f*A[t%2][i-1][j+1][k-1] +
0.0181f*A[t%2][i-1][j+1][k] +
0.0191f*A[t%2][i-1][j+1][k+1] +
0.0201f*A[t%2][i-1][j+1][k+2] +
0.0211f*A[t%2][i-1][j+2][k-2] +
0.0221f*A[t%2][i-1][j+2][k-1] +
0.0231f*A[t%2][i-1][j+2][k] +
0.0241f*A[t%2][i-1][j+2][k+1] +
0.0251f*A[t%2][i-1][j+2][k+2] +
0.6712f*A[t%2][i][j][k] +
0.0022f*A[t%2][i][j-2][k-2] +
0.0032f*A[t%2][i][j-2][k-1] +
0.0042f*A[t%2][i][j-2][k] +
0.0052f*A[t%2][i][j-2][k+1] +
0.0062f*A[t%2][i][j-2][k+2] +
0.0072f*A[t%2][i][j-1][k-2] +
0.0082f*A[t%2][i][j-1][k-1] +
0.0092f*A[t%2][i][j-1][k] +
0.0102f*A[t%2][i][j-1][k+1] +
0.0112f*A[t%2][i][j-1][k+2] +
0.0122f*A[t%2][i][j][k-2] +
0.0132f*A[t%2][i][j][k-1] +
0.0142f*A[t%2][i][j][k+1] +
0.0152f*A[t%2][i][j][k+2] +
0.0162f*A[t%2][i][j+1][k-2] +
0.0172f*A[t%2][i][j+1][k-1] +
0.0182f*A[t%2][i][j+1][k] +
0.0192f*A[t%2][i][j+1][k+1] +
0.0202f*A[t%2][i][j+1][k+2] +
0.0212f*A[t%2][i][j+2][k-2] +
0.0222f*A[t%2][i][j+2][k-1] +
0.0232f*A[t%2][i][j+2][k] +
0.0242f*A[t%2][i][j+2][k+1] +
0.0252f*A[t%2][i][j+2][k+2] -
0.3312f*A[t%2][i+1][j][k] +
0.0023f*A[t%2][i+1][j-2][k-2] +
0.0033f*A[t%2][i+1][j-2][k-1] +
0.0043f*A[t%2][i+1][j-2][k] +
0.0053f*A[t%2][i+1][j-2][k+1] +
0.0063f*A[t%2][i+1][j-2][k+2] +
0.0073f*A[t%2][i+1][j-1][k-2] +
0.0083f*A[t%2][i+1][j-1][k-1] +
0.0093f*A[t%2][i+1][j-1][k] +
0.0103f*A[t%2][i+1][j-1][k+1] +
0.0113f*A[t%2][i+1][j-1][k+2] +
0.0123f*A[t%2][i+1][j][k-2] +
0.0133f*A[t%2][i+1][j][k-1] +
0.0143f*A[t%2][i+1][j][k+1] +
0.0153f*A[t%2][i+1][j][k+2] +
0.0163f*A[t%2][i+1][j+1][k-2] +
0.0173f*A[t%2][i+1][j+1][k-1] +
0.0183f*A[t%2][i+1][j+1][k] +
0.0193f*A[t%2][i+1][j+1][k+1] +
0.0203f*A[t%2][i+1][j+1][k+2] +
0.0213f*A[t%2][i+1][j+2][k-2] +
0.0223f*A[t%2][i+1][j+2][k-1] +
0.0233f*A[t%2][i+1][j+2][k] +
0.0243f*A[t%2][i+1][j+2][k+1] +
0.0253f*A[t%2][i+1][j+2][k+2] -
0.3336f*A[t%2][i+2][j][k] +
0.0024f*A[t%2][i+2][j-2][k-2] +
0.0034f*A[t%2][i+2][j-2][k-1] +
0.0044f*A[t%2][i+2][j-2][k] +
0.0054f*A[t%2][i+2][j-2][k+1] +
0.0064f*A[t%2][i+2][j-2][k+2] +
0.0074f*A[t%2][i+2][j-1][k-2] +
0.0084f*A[t%2][i+2][j-1][k-1] +
0.0094f*A[t%2][i+2][j-1][k] +
0.0104f*A[t%2][i+2][j-1][k+1] +
0.0114f*A[t%2][i+2][j-1][k+2] +
0.0124f*A[t%2][i+2][j][k-2] +
0.0134f*A[t%2][i+2][j][k-1] +
0.0144f*A[t%2][i+2][j][k+1] +
0.0154f*A[t%2][i+2][j][k+2] +
0.0164f*A[t%2][i+2][j+1][k-2] +
0.0174f*A[t%2][i+2][j+1][k-1] +
0.0184f*A[t%2][i+2][j+1][k] +
0.0194f*A[t%2][i+2][j+1][k+1] +
0.0204f*A[t%2][i+2][j+1][k+2] +
0.0214f*A[t%2][i+2][j+2][k-2] +
0.0224f*A[t%2][i+2][j+2][k-1] +
0.0234f*A[t%2][i+2][j+2][k] +
0.0244f*A[t%2][i+2][j+2][k+1] +
0.0254f*A[t%2][i+2][j+2][k+2];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
DepositCircuit.h | #ifndef _DEPOSITCIRCUIT_H_
#define _DEPOSITCIRCUIT_H_
#include "Circuit.h"
#include "../Utils/Constants.h"
#include "../Utils/Data.h"
#include "../ThirdParty/BigIntHeader.hpp"
#include "ethsnarks.hpp"
#include "utils.hpp"
#include "gadgets/sha256_many.hpp"
using namespace ethsnarks;
namespace Loopring
{
class DepositGadget : public GadgetT
{
public:
const Constants& constants;
// User state
BalanceGadget balanceBefore;
AccountGadget accountBefore;
// Inputs
DualVariableGadget accountID;
DualVariableGadget tokenID;
DualVariableGadget amount;
DualVariableGadget publicKeyX;
DualVariableGadget publicKeyY;
// Calculate the new balance
UnsafeAddGadget uncappedBalanceAfter;
MinGadget balanceAfter;
// Update User
UpdateBalanceGadget updateBalance;
UpdateAccountGadget updateAccount;
DepositGadget(
ProtoboardT& pb,
const Constants& _constants,
const VariableT& root,
const std::string& prefix
) :
GadgetT(pb, prefix),
constants(_constants),
// User state
balanceBefore(pb, FMT(prefix, ".balanceBefore")),
accountBefore(pb, FMT(prefix, ".accountBefore")),
// Inputs
accountID(pb, NUM_BITS_ACCOUNT, FMT(prefix, ".accountID")),
tokenID(pb, NUM_BITS_TOKEN, FMT(prefix, ".tokenID")),
amount(pb, NUM_BITS_AMOUNT, FMT(prefix, ".amount")),
publicKeyX(pb, 256, FMT(prefix, ".publicKeyX")),
publicKeyY(pb, 256, FMT(prefix, ".publicKeyY")),
// Calculate the new balance
// We can't let the deposit fail (it's onchain so it needs to be included),
// and we do want to cap the balance to NUM_BITS_AMOUNT bits max, so cap the balance even
// if it means that the user loses some tokens (NUM_BITS_AMOUNT bits should be more than enough).
uncappedBalanceAfter(pb, balanceBefore.balance, amount.packed, FMT(prefix, ".uncappedBalanceAfter")),
balanceAfter(pb, uncappedBalanceAfter.result(), constants.maxAmount, NUM_BITS_AMOUNT + 1, FMT(prefix, ".balanceAfter")),
// Update User
updateBalance(pb, accountBefore.balancesRoot, tokenID.bits,
{balanceBefore.balance, balanceBefore.tradingHistory},
{balanceAfter.result(), balanceBefore.tradingHistory},
FMT(prefix, ".updateBalance")),
updateAccount(pb, root, accountID.bits,
{accountBefore.publicKey.x, accountBefore.publicKey.y, accountBefore.nonce, accountBefore.balancesRoot},
{publicKeyX.packed, publicKeyY.packed, accountBefore.nonce, updateBalance.result()},
FMT(prefix, ".updateAccount"))
{
}
void generate_r1cs_witness(const Deposit& deposit)
{
// User state
balanceBefore.generate_r1cs_witness(deposit.balanceUpdate.before);
accountBefore.generate_r1cs_witness(deposit.accountUpdate.before);
// Inputs
accountID.generate_r1cs_witness(pb, deposit.accountUpdate.accountID);
tokenID.generate_r1cs_witness(pb, deposit.balanceUpdate.tokenID);
amount.generate_r1cs_witness(pb, deposit.amount);
publicKeyX.generate_r1cs_witness(pb, deposit.accountUpdate.after.publicKey.x);
publicKeyY.generate_r1cs_witness(pb, deposit.accountUpdate.after.publicKey.y);
// Calculate the new balance
uncappedBalanceAfter.generate_r1cs_witness();
balanceAfter.generate_r1cs_witness();
// Update User
updateBalance.generate_r1cs_witness(deposit.balanceUpdate.proof);
updateAccount.generate_r1cs_witness(deposit.accountUpdate.proof);
}
void generate_r1cs_constraints()
{
// Inputs
accountID.generate_r1cs_constraints(true);
tokenID.generate_r1cs_constraints(true);
amount.generate_r1cs_constraints(true);
publicKeyX.generate_r1cs_constraints(true);
publicKeyY.generate_r1cs_constraints(true);
// Calculate the new balance
uncappedBalanceAfter.generate_r1cs_constraints();
balanceAfter.generate_r1cs_constraints();
// Update User
updateBalance.generate_r1cs_constraints();
updateAccount.generate_r1cs_constraints();
}
const std::vector<VariableArrayT> getOnchainData() const
{
return {accountID.bits,
publicKeyX.bits, publicKeyY.bits,
VariableArrayT(6, constants.zero), tokenID.bits,
amount.bits};
}
const VariableT& getNewAccountsRoot() const
{
return updateAccount.result();
}
};
class DepositCircuit : public Circuit
{
public:
PublicDataGadget publicData;
Constants constants;
// Inputs
DualVariableGadget exchangeID;
DualVariableGadget merkleRootBefore;
DualVariableGadget merkleRootAfter;
DualVariableGadget depositBlockHashStart;
DualVariableGadget startIndex;
DualVariableGadget count;
// Deposits
unsigned int numDeposits;
std::vector<DepositGadget> deposits;
std::vector<sha256_many> hashers;
DepositCircuit(ProtoboardT& pb, const std::string& prefix) :
Circuit(pb, prefix),
publicData(pb, FMT(prefix, ".publicData")),
constants(pb, FMT(prefix, ".constants")),
// Inputs
exchangeID(pb, NUM_BITS_EXCHANGE_ID, FMT(prefix, ".exchangeID")),
merkleRootBefore(pb, 256, FMT(prefix, ".merkleRootBefore")),
merkleRootAfter(pb, 256, FMT(prefix, ".merkleRootAfter")),
depositBlockHashStart(pb, 256, FMT(prefix, ".depositBlockHashStart")),
startIndex(pb, 32, FMT(prefix, ".startIndex")),
count(pb, 32, FMT(prefix, ".count"))
{
}
void generateConstraints(bool onchainDataAvailability, unsigned int blockSize) override
{
this->numDeposits = blockSize;
constants.generate_r1cs_constraints();
// Inputs
exchangeID.generate_r1cs_constraints(true);
merkleRootBefore.generate_r1cs_constraints(true);
merkleRootAfter.generate_r1cs_constraints(true);
depositBlockHashStart.generate_r1cs_constraints(true);
startIndex.generate_r1cs_constraints(true);
count.generate_r1cs_constraints(true);
// Deposits
deposits.reserve(numDeposits);
hashers.reserve(numDeposits);
for (size_t j = 0; j < numDeposits; j++)
{
VariableT depositAccountsRoot = (j == 0) ? merkleRootBefore.packed : deposits.back().getNewAccountsRoot();
deposits.emplace_back(
pb,
constants,
depositAccountsRoot,
std::string("deposit_") + std::to_string(j)
);
deposits.back().generate_r1cs_constraints();
// Hash data from deposit
std::vector<VariableArrayT> depositData = deposits.back().getOnchainData();
std::vector<VariableArrayT> hashBits;
hashBits.push_back(reverse((j == 0) ? depositBlockHashStart.bits : hashers.back().result().bits));
hashBits.insert(hashBits.end(), depositData.begin(), depositData.end());
hashers.emplace_back(pb, flattenReverse(hashBits), std::string("hash_") + std::to_string(j));
hashers.back().generate_r1cs_constraints();
}
// Public data
publicData.add(exchangeID.bits);
publicData.add(merkleRootBefore.bits);
publicData.add(merkleRootAfter.bits);
publicData.add(reverse(depositBlockHashStart.bits));
publicData.add(reverse(hashers.back().result().bits));
publicData.add(startIndex.bits);
publicData.add(count.bits);
publicData.generate_r1cs_constraints();
// Check the new merkle root
requireEqual(pb, deposits.back().getNewAccountsRoot(), merkleRootAfter.packed, "newMerkleRoot");
}
bool generateWitness(const DepositBlock& block)
{
constants.generate_r1cs_witness();
// Inputs
exchangeID.generate_r1cs_witness(pb, block.exchangeID);
merkleRootBefore.generate_r1cs_witness(pb, block.merkleRootBefore);
merkleRootAfter.generate_r1cs_witness(pb, block.merkleRootAfter);
depositBlockHashStart.generate_r1cs_witness(pb, block.startHash);
startIndex.generate_r1cs_witness(pb, block.startIndex);
count.generate_r1cs_witness(pb, block.count);
// printBits("start hash input: 0x", depositBlockHashStart.get_bits(pb), true);
// Deposits
assert(deposits.size() == hashers.size());
#ifdef MULTICORE
#pragma omp parallel for
#endif
for(unsigned int i = 0; i < block.deposits.size(); i++)
{
deposits[i].generate_r1cs_witness(block.deposits[i]);
}
// Cannot be done in parallel
for(unsigned int i = 0; i < block.deposits.size(); i++)
{
hashers[i].generate_r1cs_witness();
}
// printBits("DepositBlockHash: 0x", hashers.back().result().bits.get_bits(pb));
// Public data
publicData.generate_r1cs_witness();
return true;
}
bool generateWitness(const json& input) override
{
return generateWitness(input.get<Loopring::DepositBlock>());
}
BlockType getBlockType() override
{
return BlockType::Deposit;
}
unsigned int getBlockSize() override
{
return numDeposits;
}
void printInfo() override
{
std::cout << pb.num_constraints() << " constraints (" << (pb.num_constraints() / numDeposits) << "/deposit)" << std::endl;
}
};
}
#endif
|
GB_binop__isgt_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isgt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__isgt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__isgt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__isgt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_fp32)
// A*D function (colscale): GB (_AxD__isgt_fp32)
// D*A function (rowscale): GB (_DxB__isgt_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__isgt_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__isgt_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_fp32)
// C=scalar+B GB (_bind1st__isgt_fp32)
// C=scalar+B' GB (_bind1st_tran__isgt_fp32)
// C=A+scalar GB (_bind2nd__isgt_fp32)
// C=A'+scalar GB (_bind2nd_tran__isgt_fp32)
// C type: float
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGT || GxB_NO_FP32 || GxB_NO_ISGT_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isgt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isgt_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isgt_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isgt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isgt_fp32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isgt_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isgt_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isgt_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isgt_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isgt_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isgt_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isgt_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__isgt_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__isgt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB058-jacobikernel-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two parallel for loops within one single parallel region,
combined with private() and reduction().
*/
#include <stdio.h>
#include <math.h>
#define MSIZE 200
int n=MSIZE, m=MSIZE, mits=1000;
double tol=0.0000000001, relax = 1.0, alpha = 0.0543;
double u[MSIZE][MSIZE], f[MSIZE][MSIZE], uold[MSIZE][MSIZE];
double dx, dy;
void
initialize ()
{
int i, j, xx, yy;
dx = 2.0 / (n - 1);
dy = 2.0 / (m - 1);
/* Initialize initial condition and RHS */
#pragma omp parallel for private(i,j,xx,yy)
for (i = 0; i < n; i++)
#pragma omp parallel for private(j,xx,yy)
for (j = 0; j < m; j++)
{
xx = (int) (-1.0 + dx * (i - 1)); /* -1 < x < 1 */
yy = (int) (-1.0 + dy * (j - 1)); /* -1 < y < 1 */
u[i][j] = 0.0;
f[i][j] = -1.0 * alpha * (1.0 - xx * xx) * (1.0 - yy * yy)
- 2.0 * (1.0 - xx * xx) - 2.0 * (1.0 - yy * yy);
}
}
void
jacobi ()
{
double omega;
int i, j, k;
double error, resid, ax, ay, b;
omega = relax;
/* Initialize coefficients */
dx = 2.0 / (n - 1);
dy = 2.0 / (m - 1);
ax = 1.0 / (dx * dx); /* X-direction coef */
ay = 1.0 / (dy * dy); /* Y-direction coef */
b = -2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha; /* Central coeff */
error = 10.0 * tol;
k = 1;
while (k <= mits)
{
error = 0.0;
/* Copy new solution into old */
#pragma omp parallel for private(j)
for (i = 0; i < n; i++)
#pragma omp parallel for
for (j = 0; j < m; j++)
uold[i][j] = u[i][j];
#pragma omp parallel for private(j,resid) reduction(+:error)
for (i = 1; i < (n - 1); i++)
#pragma omp parallel for private(resid) reduction(+:error)
for (j = 1; j < (m - 1); j++)
{
resid = (ax * (uold[i - 1][j] + uold[i + 1][j])
+ ay * (uold[i][j - 1] + uold[i][j + 1]) +
b * uold[i][j] - f[i][j]) / b;
u[i][j] = uold[i][j] - omega * resid;
error = error + resid * resid;
}
/* Error check */
k = k + 1;
error = sqrt (error) / (n * m);
} /* End iteration loop */
printf ("Total Number of Iterations:%d\n", k);
printf ("Residual:%E\n", error);
}
int main()
{
initialize();
jacobi();
return 0;
}
|
two_flip_move_generator.h | /*****************************************************************************/
// Copyright (c) 2020-2021 Yuji KOGUMA
// Released under the MIT license
// https://opensource.org/licenses/mit-license.php
/*****************************************************************************/
#ifndef PRINTEMPS_NEIGHBORHOOD_TWO_FLIP_MOVE_GENERATOR_H__
#define PRINTEMPS_NEIGHBORHOOD_TWO_FLIP_MOVE_GENERATOR_H__
#include "abstract_move_generator.h"
namespace printemps {
namespace neighborhood {
/*****************************************************************************/
template <class T_Variable, class T_Expression>
class TwoFlipMoveGenerator
: public AbstractMoveGenerator<T_Variable, T_Expression> {
private:
public:
/*************************************************************************/
TwoFlipMoveGenerator(void) {
/// nothing to do
}
/*************************************************************************/
virtual ~TwoFlipMoveGenerator(void) {
/// nothing to do
}
/*************************************************************************/
void setup(const std::vector<
std::pair<model_component::Variable<T_Variable, T_Expression> *,
model_component::Variable<T_Variable, T_Expression> *>>
&a_FLIPPABLE_VARIABLE_PTR_PAIRS) {
/**
* Setup move objects.
*/
const int PAIRS_SIZE = a_FLIPPABLE_VARIABLE_PTR_PAIRS.size();
this->m_moves.resize(2 * PAIRS_SIZE);
this->m_flags.resize(2 * PAIRS_SIZE);
for (auto i = 0; i < PAIRS_SIZE; i++) {
auto &move = this->m_moves[2 * i];
move.sense = MoveSense::TwoFlip;
move.alterations.emplace_back(
a_FLIPPABLE_VARIABLE_PTR_PAIRS[i].first, 1);
move.alterations.emplace_back(
a_FLIPPABLE_VARIABLE_PTR_PAIRS[i].second, 0);
move.is_univariable_move = false;
move.is_selection_move = false;
utility::update_union_set(&(move.related_constraint_ptrs),
a_FLIPPABLE_VARIABLE_PTR_PAIRS[i]
.first->related_constraint_ptrs());
utility::update_union_set(&(move.related_constraint_ptrs),
a_FLIPPABLE_VARIABLE_PTR_PAIRS[i]
.second->related_constraint_ptrs());
move.is_special_neighborhood_move = true;
move.is_available = true;
move.overlap_rate = 0.0;
this->m_moves[2 * i + 1] = move;
this->m_moves[2 * i + 1].alterations[0].second = 0;
this->m_moves[2 * i + 1].alterations[1].second = 1;
}
/**
* Setup move objects.
*/
auto move_updater = //
[this](auto * a_moves_ptr, //
auto * a_flags, //
const bool a_ACCEPT_ALL, //
const bool a_ACCEPT_OBJECTIVE_IMPROVABLE, //
const bool a_ACCEPT_FEASIBILITY_IMPROVABLE, //
[[maybe_unused]] const bool a_IS_ENABLED_PARALLEL) {
const int MOVES_SIZE = a_moves_ptr->size();
#ifdef _OPENMP
#pragma omp parallel for if (a_IS_ENABLED_PARALLEL) schedule(static)
#endif
for (auto i = 0; i < MOVES_SIZE; i++) {
(*a_flags)[i] = 1;
if (!(*a_moves_ptr)[i].is_available) {
(*a_flags)[i] = 0;
continue;
}
if (neighborhood::has_selection_variable(
(*a_moves_ptr)[i])) {
(*a_flags)[i] = 0;
continue;
}
if (neighborhood::has_fixed_variable((*a_moves_ptr)[i])) {
(*a_flags)[i] = 0;
continue;
}
for (const auto &alteration :
(*a_moves_ptr)[i].alterations) {
if (alteration.first->value() == alteration.second) {
(*a_flags)[i] = 0;
break;
}
}
if ((*a_flags)[i] == 0) {
continue;
}
if (a_ACCEPT_ALL) {
/** nothing to do */
} else {
if (a_ACCEPT_OBJECTIVE_IMPROVABLE &&
neighborhood::has_objective_improvable_variable(
(*a_moves_ptr)[i])) {
continue;
}
if (a_ACCEPT_FEASIBILITY_IMPROVABLE &&
neighborhood::has_feasibility_improvable_variable(
(*a_moves_ptr)[i])) {
continue;
}
(*a_flags)[i] = 0;
}
}
};
this->m_move_updater = move_updater;
}
};
} // namespace neighborhood
} // namespace printemps
#endif
/*****************************************************************************/
// END
/*****************************************************************************/ |
dtrmm.c | #include "blas.h"
#include "error.h"
#include <stdio.h>
#include "handle.h"
#include "config.h"
#include "dtrmm.fatbin.c"
static inline size_t min(size_t a, size_t b) { return (a < b) ? a : b; }
static inline size_t max(size_t a, size_t b) { return (a > b) ? a : b; }
static inline CUresult cuMemcpyHtoD2DAsync(CUdeviceptr A, size_t lda, size_t ai, size_t aj,
const void * B, size_t ldb, size_t bi, size_t bj,
size_t m, size_t n, size_t elemSize, CUstream stream) {
CUDA_MEMCPY2D copy = {
bi * elemSize, bj, CU_MEMORYTYPE_HOST, B, 0, 0, ldb * elemSize,
ai * elemSize, aj, CU_MEMORYTYPE_DEVICE, NULL, A, 0, lda * elemSize,
m * elemSize, n };
return cuMemcpy2DAsync(©, stream);
}
static inline CUresult cuMemcpyDtoH2DAsync(void * A, size_t lda, size_t ai, size_t aj,
CUdeviceptr B, size_t ldb, size_t bi, size_t bj,
size_t m, size_t n, size_t elemSize, CUstream stream) {
CUDA_MEMCPY2D copy = {
bi * elemSize, bj, CU_MEMORYTYPE_DEVICE, NULL, B, 0, ldb * elemSize,
ai * elemSize, aj, CU_MEMORYTYPE_HOST, A, 0, 0, lda * elemSize,
m * elemSize, n };
return cuMemcpy2DAsync(©, stream);
}
static inline CUresult cuMemcpyDtoD2DAsync(CUdeviceptr A, size_t lda, size_t ai, size_t aj,
CUdeviceptr B, size_t ldb, size_t bi, size_t bj,
size_t m, size_t n, size_t elemSize, CUstream stream) {
CUDA_MEMCPY2D copy = {
bi * elemSize, bj, CU_MEMORYTYPE_DEVICE, NULL, B, 0, ldb * elemSize,
ai * elemSize, aj, CU_MEMORYTYPE_DEVICE, NULL, A, 0, lda * elemSize,
m * elemSize, n };
return cuMemcpy2DAsync(©, stream);
}
static const double zero = 0.0;
static const double one = 1.0;
void dtrmm(CBlasSide side, CBlasUplo uplo, CBlasTranspose trans, CBlasDiag diag,
size_t m, size_t n,
double alpha, const double * restrict A, size_t lda,
double * restrict B, size_t ldb) {
const size_t nRowA = (side == CBlasLeft) ? m : n;
int info = 0;
if (lda < nRowA)
info = 9;
else if (ldb < m)
info = 11;
if (info != 0) {
XERBLA(info);
return;
}
if (m == 0 || n == 0)
return;
if (alpha == zero) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i < m; i++)
B[j * ldb + i] = zero;
}
return;
}
if (side == CBlasLeft) {
if (trans == CBlasNoTrans) {
if (uplo == CBlasUpper) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t k = 0; k < m; k++) {
if (B[j * ldb + k] != zero) {
register double temp = alpha * B[j * ldb + k];
for (size_t i = 0; i < k; i++)
B[j * ldb + i] += temp * A[k * lda + i];
if (diag == CBlasNonUnit) temp *= A[k * lda + k];
B[j * ldb + k] = temp;
}
}
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
size_t k = m - 1;
do {
if (B[j * ldb + k] != zero) {
register double temp = alpha * B[j * ldb + k];
B[j * ldb + k] = temp;
if (diag == CBlasNonUnit) B[j * ldb + k] *= A[k * lda + k];
for (size_t i = k + 1; i < m; i++)
B[j * ldb + i] += temp * A[k * lda + i];
}
} while (k-- > 0);
}
}
}
else {
if (uplo == CBlasUpper) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
size_t i = m - 1;
do {
register double temp = B[j * ldb + i];
if (diag == CBlasNonUnit) temp *= A[i * lda + i];
for (size_t k = 0; k < i; k++)
temp += A[i * lda + k] * B[j * ldb + k];
B[j * ldb + i] = alpha * temp;
} while (i-- > 0);
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i < m; i++) {
register double temp = B[j * ldb + i];
if (diag == CBlasNonUnit) temp *= A[i * lda + i];
for (size_t k = i + 1; k < m; k++)
temp += A[i * lda + k] * B[j * ldb + k];
B[j * ldb + i] = alpha * temp;
}
}
}
}
}
else {
if (trans == CBlasNoTrans) {
if (uplo == CBlasUpper) {
size_t j = n - 1;
do {
register double temp = alpha;
if (diag == CBlasNonUnit) temp *= A[j * lda + j];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= temp;
for (size_t k = 0; k < j; k++) {
if (A[j * lda + k] != zero) {
register double temp = alpha * A[j * lda + k];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] += temp * B[k * ldb + i];
}
}
} while (j-- > 0);
}
else {
for (size_t j = 0; j < n; j++) {
register double temp = alpha;
if (diag == CBlasNonUnit) temp *= A[j * lda + j];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= temp;
for (size_t k = j + 1; k < n; k++) {
if (A[j * lda + k] != zero) {
register double temp = alpha * A[j * lda + k];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] += temp * B[k * ldb + i];
}
}
}
}
}
else {
if (uplo == CBlasUpper) {
for (size_t k = 0; k < n; k++) {
for (size_t j = 0; j < k; j++) {
if (A[k * lda + j] != zero) {
register double temp = alpha * A[k * lda + j];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] += temp * B[k * ldb + i];
}
}
register double temp = alpha;
if (diag == CBlasNonUnit) temp *= A[k * lda + k];
if (temp != one) {
for (size_t i = 0; i < m; i++)
B[k * ldb + i] = temp * B[k * ldb + i];
}
}
}
else {
size_t k = n - 1;
do {
for (size_t j = k + 1; j < n; j++) {
if (A[k * lda + j] != zero) {
register double temp = alpha * A[k * lda + j];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] += temp * B[k * ldb + i];
}
}
register double temp = alpha;
if (diag == CBlasNonUnit) temp *= A[k * lda + k];
if (temp != one) {
for (size_t i = 0; i < m; i++)
B[k * ldb + i] = temp * B[k * ldb + i];
}
} while (k-- > 0);
}
}
}
}
void dtrmm2(CBlasSide side, CBlasUplo uplo, CBlasTranspose trans, CBlasDiag diag,
size_t m, size_t n,
double alpha, const double * restrict A, size_t lda,
const double * restrict B, size_t ldb,
double * restrict X, size_t ldx) {
const size_t nRowA = (side == CBlasLeft) ? m : n;
int info = 0;
if (lda < nRowA)
info = 9;
else if (ldb < m)
info = 11;
else if (ldx < m)
info = 13;
if (info != 0) {
XERBLA(info);
return;
}
if (m == 0 || n == 0)
return;
if (alpha == zero) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i < m; i++)
X[j * ldx + i] = zero;
}
return;
}
if (side == CBlasLeft) {
if (trans == CBlasNoTrans) {
if (uplo == CBlasUpper) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t k = 0; k < m; k++) {
register double temp = B[j * ldb + k];
if (temp != zero) {
temp *= alpha;
for (size_t i = 0; i < k; i++)
X[j * ldx + i] += temp * A[k * lda + i];
if (diag == CBlasNonUnit) temp *= A[k * lda + k];
}
X[j * ldx + k] = temp;
}
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
size_t k = m - 1;
do {
if (B[j * ldb + k] != zero) {
register double temp = alpha * B[j * ldb + k];
X[j * ldx + k] = temp;
if (diag == CBlasNonUnit) X[j * ldx + k] *= A[k * lda + k];
for (size_t i = k + 1; i < m; i++)
X[j * ldx + i] += temp * A[k * lda + i];
}
else
X[j * ldx + k] = B[j * ldb + k];
} while (k-- > 0);
}
}
}
else {
if (uplo == CBlasUpper) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
size_t i = m - 1;
do {
register double temp = B[j * ldb + i];
if (diag == CBlasNonUnit) temp *= A[i * lda + i];
for (size_t k = 0; k < i; k++)
temp += A[i * lda + k] * B[j * ldb + k];
X[j * ldx + i] = alpha * temp;
} while (i-- > 0);
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i < m; i++) {
register double temp = B[j * ldb + i];
if (diag == CBlasNonUnit) temp *= A[i * lda + i];
for (size_t k = i + 1; k < m; k++)
temp += A[i * lda + k] * B[j * ldb + k];
X[j * ldx + i] = alpha * temp;
}
}
}
}
}
else {
if (trans == CBlasNoTrans) {
if (uplo == CBlasUpper) {
size_t j = n - 1;
do {
register double temp = alpha;
if (diag == CBlasNonUnit) temp *= A[j * lda + j];
for (size_t i = 0; i < m; i++)
X[j * ldx + i] = temp * B[j * ldb + i];
for (size_t k = 0; k < j; k++) {
if (A[j * lda + k] != zero) {
register double temp = alpha * A[j * lda + k];
for (size_t i = 0; i < m; i++)
X[j * ldx + i] += temp * B[k * ldb + i];
}
}
} while (j-- > 0);
}
else {
for (size_t j = 0; j < n; j++) {
register double temp = alpha;
if (diag == CBlasNonUnit) temp *= A[j * lda + j];
for (size_t i = 0; i < m; i++)
X[j * ldx + i] = temp * B[j * ldb + i];
for (size_t k = j + 1; k < n; k++) {
if (A[j * lda + k] != zero) {
register double temp = alpha * A[j * lda + k];
for (size_t i = 0; i < m; i++)
X[j * ldx + i] += temp * B[k * ldb + i];
}
}
}
}
}
else {
if (uplo == CBlasUpper) {
for (size_t k = 0; k < n; k++) {
for (size_t j = 0; j < k; j++) {
if (A[k * lda + j] != zero) {
register double temp = alpha * A[k * lda + j];
for (size_t i = 0; i < m; i++)
X[j * ldx + i] += temp * B[k * ldb + i];
}
}
register double temp = alpha;
if (diag == CBlasNonUnit) temp *= A[k * lda + k];
if (temp != one) {
for (size_t i = 0; i < m; i++)
X[k * ldx + i] = temp * B[k * ldb + i];
}
}
}
else {
size_t k = n - 1;
do {
for (size_t j = k + 1; j < n; j++) {
if (A[k * lda + j] != zero) {
register double temp = alpha * A[k * lda + j];
for (size_t i = 0; i < m; i++)
X[j * ldx + i] += temp * B[k * ldb + i];
}
}
register double temp = alpha;
if (diag == CBlasNonUnit) temp *= A[k * lda + k];
if (temp != one) {
for (size_t i = 0; i < m; i++)
X[k * ldx + i] = temp * B[k * ldb + i];
}
} while (k-- > 0);
}
}
}
}
CUresult cuDtrmm2(CUBLAShandle handle,
CBlasSide side, CBlasUplo uplo, CBlasTranspose trans, CBlasDiag diag,
size_t m, size_t n,
double alpha,
CUdeviceptr A, size_t lda, CUdeviceptr B, size_t ldb,
CUdeviceptr X, size_t ldx, CUstream stream) {
const size_t nRowA = (side == CBlasLeft) ? m : n;
int info = 0;
if (lda < nRowA)
info = 9;
else if (ldb < m)
info = 11;
else if (ldx < m)
info = 13;
if (info != 0) {
XERBLA(info);
return CUDA_ERROR_INVALID_VALUE;
}
if (m == 0 || n == 0)
return CUDA_SUCCESS;
CU_ERROR_CHECK(cuCtxPushCurrent(handle->context));
if (handle->dtrmm2 == NULL)
CU_ERROR_CHECK(cuModuleLoadData(&handle->dtrmm2, imageBytes));
const unsigned int mb = (side == CBlasRight) ? 64 : (trans == CBlasNoTrans) ? 64 : 32;
const unsigned int nb = (side == CBlasRight) ? 8 : (trans == CBlasNoTrans) ? 8 : 16;
const unsigned int kb = (side == CBlasRight) ? 8 : (trans == CBlasNoTrans) ? 16 : 8;
const unsigned int bx = (side == CBlasRight) ? 8 : (trans == CBlasNoTrans) ? 16 : 8;
const unsigned int by = (side == CBlasRight) ? 8 : (trans == CBlasNoTrans) ? 4 : 8;
char name[67];
snprintf(name, 67,
"_Z8dtrmm%c%c%cIL9CBlasDiag%dELj%uELj%uELj%uELj%uELj%uEEvPKdS2_Pddiiiii",
side, uplo, trans, diag, mb, nb, kb, bx, by);
CUfunction function;
CU_ERROR_CHECK(cuModuleGetFunction(&function, handle->dtrmm2, name));
void * params[] = { &A, &B, &X, &alpha, &lda, &ldb, &ldx, &m, &n };
CU_ERROR_CHECK(cuLaunchKernel(function,
(unsigned int)(m + mb - 1) / mb, (unsigned int)(n + nb - 1) / nb, 1,
bx, by, 1,
0, stream, params, NULL));
CU_ERROR_CHECK(cuCtxPopCurrent(&handle->context));
return CUDA_SUCCESS;
}
CUresult cuDtrmm(CUBLAShandle handle,
CBlasSide side, CBlasUplo uplo, CBlasTranspose trans, CBlasDiag diag,
size_t m, size_t n,
double alpha,
CUdeviceptr A, size_t lda, CUdeviceptr B, size_t ldb,
CUstream stream) {
const size_t nRowA = (side == CBlasLeft) ? m : n;
int info = 0;
if (lda < nRowA)
info = 9;
else if (ldb < m)
info = 11;
if (info != 0) {
XERBLA(info);
return CUDA_ERROR_INVALID_VALUE;
}
if (m == 0 || n == 0)
return CUDA_SUCCESS;
CU_ERROR_CHECK(cuCtxPushCurrent(handle->context));
CUdeviceptr X;
size_t ldx;
CU_ERROR_CHECK(cuMemAllocPitch(&X, &ldx, m * sizeof(double), n, sizeof(double)));
ldx /= sizeof(double);
CU_ERROR_CHECK(cuDtrmm2(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb, X, ldx, stream));
CU_ERROR_CHECK(cuMemcpyDtoD2DAsync(B, ldb, 0, 0, X, ldx, 0, 0, m, n, sizeof(double), stream));
CU_ERROR_CHECK(cuMemFree(X));
CU_ERROR_CHECK(cuCtxPopCurrent(&handle->context));
return CUDA_SUCCESS;
}
CUresult cuMultiGPUDtrmm(CUmultiGPUBLAShandle handle,
CBlasSide side, CBlasUplo uplo, CBlasTranspose trans, CBlasDiag diag,
size_t m, size_t n,
double alpha, const double * restrict A, size_t lda,
double * restrict B, size_t ldb) {
const size_t nRowA = (side == CBlasLeft) ? m : n;
int info = 0;
if (lda < nRowA)
info = 9;
else if (ldb < m)
info = 11;
if (info != 0) {
XERBLA(info);
return CUDA_ERROR_INVALID_VALUE;
}
if (m == 0 || n == 0)
return CUDA_SUCCESS;
if (alpha == zero) {
dgemm(CBlasNoTrans, CBlasNoTrans, m, n, 0, zero, A, lda, B, ldb, zero, B, ldb);
return CUDA_SUCCESS;
}
const size_t mb = (trans == CBlasNoTrans) ? DGEMM_N_MB : DGEMM_T_MB;
const size_t nb = DGEMM_N_NB;
if (m <= mb || n <= nb) {
dtrmm(side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb);
return CUDA_SUCCESS;
}
if (side == CBlasLeft) {
if (trans == CBlasNoTrans) {
if (uplo == CBlasUpper) {
size_t i = (m + mb - 1) & ~(mb - 1);
do {
i -= mb;
const size_t ib = min(mb, m - i);
CU_ERROR_CHECK(cuMultiGPUDgemm(handle, CBlasNoTrans, CBlasNoTrans, ib, n, m - i - ib, -one, &A[(i + ib) * lda + i], lda, &B[i + ib], ldb, alpha, &B[i], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
dtrmm(CBlasLeft, CBlasUpper, CBlasNoTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb);
} while (i > 0);
}
else {
for (size_t i = 0; i < m; i += mb) {
const size_t ib = min(mb, m - i);
CU_ERROR_CHECK(cuMultiGPUDgemm(handle, CBlasNoTrans, CBlasNoTrans, ib, n, i, -one, &A[i], lda, B, ldb, alpha, &B[i], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
dtrmm(CBlasLeft, CBlasLower, CBlasNoTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb);
}
}
}
else {
if (uplo == CBlasUpper) {
for (size_t i = 0; i < m; i += mb) {
const size_t ib = min(mb, m - i);
CU_ERROR_CHECK(cuMultiGPUDgemm(handle, CBlasTrans, CBlasNoTrans, ib, n, i, -one, &A[i * lda], lda, B, ldb, alpha, &B[i], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
dtrmm(CBlasLeft, CBlasUpper, CBlasTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb);
}
}
else {
size_t i = (m + mb - 1) & ~(mb - 1);
do {
i -= mb;
const size_t ib = min(mb, m - i);
CU_ERROR_CHECK(cuMultiGPUDgemm(handle, CBlasTrans, CBlasNoTrans, ib, n, m - i - ib, -one, &A[i * lda + i + ib], lda, &B[i + ib], ldb, alpha, &B[i], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
dtrmm(CBlasLeft, CBlasLower, CBlasTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb);
} while (i > 0);
}
}
}
else {
if (trans == CBlasNoTrans) {
if (uplo == CBlasUpper) {
for (size_t j = 0; j < n; j += nb) {
const size_t jb = min(nb, n - j);
CU_ERROR_CHECK(cuMultiGPUDgemm(handle, CBlasNoTrans, CBlasNoTrans, m, jb, j, -one, B, ldb, &A[j * lda], lda, alpha, &B[j * ldb], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
dtrmm(CBlasRight, CBlasUpper, CBlasNoTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb);
}
}
else {
size_t j = (n + nb - 1) & ~(nb - 1);
do {
j -= nb;
const size_t jb = min(nb, n - j);
CU_ERROR_CHECK(cuMultiGPUDgemm(handle, CBlasNoTrans, CBlasNoTrans, m, jb, n - j - jb, -one, &B[(j + jb) * ldb], ldb, &A[j * lda + j + jb], lda, alpha, &B[j * ldb], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
dtrmm(CBlasRight, CBlasLower, CBlasNoTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb);
} while (j > 0);
}
}
else {
if (uplo == CBlasUpper) {
size_t j = (n + nb - 1) & ~(nb - 1);
do {
j -= nb;
const size_t jb = min(nb, n - j);
CU_ERROR_CHECK(cuMultiGPUDgemm(handle, CBlasNoTrans, CBlasTrans, m, jb, n - j - jb, -one, &B[(j + jb) * ldb], ldb, &A[(j + jb) * lda + j], lda, alpha, &B[j * ldb], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
dtrmm(CBlasRight, CBlasUpper, CBlasTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb);
} while (j > 0);
}
else {
for (size_t j = 0; j < n; j += nb) {
const size_t jb = min(nb, n - j);
CU_ERROR_CHECK(cuMultiGPUDgemm(handle, CBlasNoTrans, CBlasTrans, m, jb, j, -one, B, ldb, &A[j], lda, alpha, &B[j * ldb], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
dtrmm(CBlasRight, CBlasLower, CBlasTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb);
}
}
}
}
return CUDA_SUCCESS;
}
|
libvoodoo.c | #include <stdlib.h>
#include <stdio.h>
#include <inttypes.h>
#include <omp.h>
#include <math.h>
__attribute__((target(mic)))
void voodoo(double* a, double* b, double* c, int nelem, int device, int offload)
{
int mthreads;
#pragma offload target(mic:device) if(offload)
{
mthreads = omp_get_max_threads();
}
const int64_t nworkers = nelem > mthreads ? mthreads : 1;
const int64_t work_split= nelem / nworkers;
const int64_t work_spill= nelem % nworkers;
#pragma offload \
target(mic:device) \
in(a:length(0) alloc_if(0) free_if(0)), \
in(b:length(0) alloc_if(0) free_if(0)), \
in(c:length(0) alloc_if(0) free_if(0)) \
in(work_split) \
in(work_spill) \
if(offload)
{
#pragma omp parallel num_threads(nworkers)
{
for(int i=0; i<100; ++i) {
const int tid = omp_get_thread_num();
int64_t work=0, work_offset=0, work_end=0;
if (tid < work_spill) {
work = work_split + 1;
work_offset = tid * work;
} else {
work = work_split;
work_offset = tid * work + work_spill;
}
work_end = work_offset + work;
if (work) {
for (int eidx=work_offset; eidx<work_end; ++eidx) {
c[eidx] = ((a[eidx] + b[eidx]) / 100.0);
}
}
}
}
}
}
|
GPUCommonMath.h | //**************************************************************************\
//* This file is property of and copyright by the ALICE Project *\
//* ALICE Experiment at CERN, All rights reserved. *\
//* *\
//* Primary Authors: Matthias Richter <Matthias.Richter@ift.uib.no> *\
//* for The ALICE HLT Project. *\
//* *\
//* Permission to use, copy, modify and distribute this software and its *\
//* documentation strictly for non-commercial purposes is hereby granted *\
//* without fee, provided that the above copyright notice appears in all *\
//* copies and that both the copyright notice and this permission notice *\
//* appear in the supporting documentation. The authors make no claims *\
//* about the suitability of this software for any purpose. It is *\
//* provided "as is" without express or implied warranty. *\
//**************************************************************************
/// \file GPUCommonMath.h
/// \author David Rohr, Sergey Gorbunov
#ifndef GPUCOMMONMATH_H
#define GPUCOMMONMATH_H
#include "GPUCommonDef.h"
#if defined(__CUDACC__) && !defined(__clang__)
#include <sm_20_atomic_functions.h>
#endif
#if !defined(__OPENCL__)
#include <cmath>
#include <algorithm>
#endif
#if !defined(__OPENCL__) || defined(__OPENCLCPP__)
namespace GPUCA_NAMESPACE
{
namespace gpu
{
#endif
class GPUCommonMath
{
public:
GPUhdni() static float2 MakeFloat2(float x, float y); // TODO: Find better appraoch that is constexpr
template <class T>
GPUhd() static T Min(const T x, const T y);
template <class T>
GPUhd() static T Max(const T x, const T y);
template <class T, class S>
GPUhd() static T MinWithRef(T x, T y, S refX, S refY, S& r);
template <class T, class S>
GPUhd() static T MaxWithRef(T x, T y, S refX, S refY, S& r);
GPUhdni() static float Sqrt(float x);
template <class T>
GPUhd() static T Abs(T x);
GPUhdni() static float ASin(float x);
GPUhdni() static float ATan(float x);
GPUhdni() static float ATan2(float y, float x);
GPUhdni() static float Sin(float x);
GPUhdni() static float Cos(float x);
GPUhdni() static float Tan(float x);
GPUhdni() static float Copysign(float x, float y);
GPUhdni() static float TwoPi() { return 6.28319f; }
GPUhdni() static float Pi() { return 3.1415926535897f; }
GPUhdni() static int Nint(float x);
GPUhdni() static bool Finite(float x);
GPUhdni() static unsigned int Clz(unsigned int val);
GPUhdni() static unsigned int Popcount(unsigned int val);
GPUhdni() static float Log(float x);
GPUdi() static unsigned int AtomicExch(GPUglobalref() GPUAtomic(unsigned int) * addr, unsigned int val) { return GPUCommonMath::AtomicExchInt(addr, val); }
GPUdi() static unsigned int AtomicAdd(GPUglobalref() GPUAtomic(unsigned int) * addr, unsigned int val) { return GPUCommonMath::AtomicAddInt(addr, val); }
GPUdi() static void AtomicMax(GPUglobalref() GPUAtomic(unsigned int) * addr, unsigned int val) { GPUCommonMath::AtomicMaxInt(addr, val); }
GPUdi() static void AtomicMin(GPUglobalref() GPUAtomic(unsigned int) * addr, unsigned int val) { GPUCommonMath::AtomicMinInt(addr, val); }
GPUdi() static unsigned int AtomicExchShared(GPUsharedref() GPUAtomic(unsigned int) * addr, unsigned int val) { return GPUCommonMath::AtomicExchInt(addr, val); }
GPUdi() static unsigned int AtomicAddShared(GPUsharedref() GPUAtomic(unsigned int) * addr, unsigned int val) { return GPUCommonMath::AtomicAddInt(addr, val); }
GPUdi() static void AtomicMaxShared(GPUsharedref() GPUAtomic(unsigned int) * addr, unsigned int val) { GPUCommonMath::AtomicMaxInt(addr, val); }
GPUdi() static void AtomicMinShared(GPUsharedref() GPUAtomic(unsigned int) * addr, unsigned int val) { GPUCommonMath::AtomicMinInt(addr, val); }
GPUd() static int Mul24(int a, int b);
GPUd() static float FMulRZ(float a, float b);
private:
template <class S, class T>
GPUd() static unsigned int AtomicExchInt(S* addr, T val);
template <class S, class T>
GPUd() static unsigned int AtomicAddInt(S* addr, T val);
template <class S, class T>
GPUd() static void AtomicMaxInt(S* addr, T val);
template <class S, class T>
GPUd() static void AtomicMinInt(S* addr, T val);
};
typedef GPUCommonMath CAMath;
#if defined(GPUCA_GPUCODE_DEVICE) && (defined(__CUDACC__) || defined(__HIPCC__)) // clang-format off
#define CHOICE(c1, c2, c3) (c2) // Select second option for CUDA and HIP
#elif defined(GPUCA_GPUCODE_DEVICE) && defined (__OPENCL__)
#define CHOICE(c1, c2, c3) (c3) // Select third option for OpenCL
#else
#define CHOICE(c1, c2, c3) (c1) //Select first option for Host
#endif // clang-format on
GPUhdi() float2 GPUCommonMath::MakeFloat2(float x, float y)
{
#if !defined(GPUCA_GPUCODE) || defined(__OPENCL__) || defined(__OPENCL_HOST__)
float2 ret = {x, y};
return ret;
#else
return make_float2(x, y);
#endif // GPUCA_GPUCODE
}
GPUhdi() int GPUCommonMath::Nint(float x)
{
int i;
if (x >= 0) {
i = int(x + 0.5f);
if (x + 0.5f == float(i) && i & 1)
i--;
} else {
i = int(x - 0.5f);
if (x - 0.5f == float(i) && i & 1)
i++;
}
return i;
}
GPUhdi() bool GPUCommonMath::Finite(float x) { return CHOICE(std::isfinite(x), true, true); }
GPUhdi() float GPUCommonMath::ATan(float x) { return CHOICE(atanf(x), atanf(x), atan(x)); }
GPUhdi() float GPUCommonMath::ATan2(float y, float x) { return CHOICE(atan2f(y, x), atan2f(y, x), atan2(y, x)); }
GPUhdi() float GPUCommonMath::Sin(float x) { return CHOICE(sinf(x), sinf(x), sin(x)); }
GPUhdi() float GPUCommonMath::Cos(float x) { return CHOICE(cosf(x), cosf(x), cos(x)); }
GPUhdi() float GPUCommonMath::Tan(float x) { return CHOICE(tanf(x), tanf(x), tan(x)); }
GPUhdi() unsigned int GPUCommonMath::Clz(unsigned int x)
{
#if (defined(__GNUC__) || defined(__clang__) || defined(__CUDACC__) || defined(__HIPCC__)) && (!defined(__OPENCL__) || defined(__OPENCLCPP__))
return x == 0 ? 32 : CHOICE(__builtin_clz(x), __clz(x), __builtin_clz(x)); // use builtin if available
#else
for (int i = 31; i >= 0; i--) {
if (x & (1 << i)) {
return (31 - i);
}
}
return 32;
#endif
}
GPUhdi() unsigned int GPUCommonMath::Popcount(unsigned int x)
{
#if (defined(__GNUC__) || defined(__clang__) || defined(__CUDACC__) || defined(__HIPCC__)) && (!defined(__OPENCL__) /*|| defined(__OPENCLCPP__)*/) // TODO: remove OPENCLCPP workaround when reported SPIR-V bug is fixed
return CHOICE(__builtin_popcount(x), __popc(x), __builtin_popcount(x)); // use builtin if available
#else
unsigned int retVal = 0;
for (int i = 0; i < 32; i++) {
if (x & (1 << i)) {
retVal++;
}
}
return retVal;
#endif
}
template <class T>
GPUhdi() T GPUCommonMath::Min(const T x, const T y)
{
return CHOICE(std::min(x, y), std::min(x, y), (x < y ? x : y));
}
template <class T>
GPUhdi() T GPUCommonMath::Max(const T x, const T y)
{
return CHOICE(std::max(x, y), std::max(x, y), (x > y ? x : y));
}
template <class T, class S>
GPUhdi() T GPUCommonMath::MinWithRef(T x, T y, S refX, S refY, S& r)
{
if (x < y) {
r = refX;
return x;
}
r = refY;
return y;
}
template <class T, class S>
GPUhdi() T GPUCommonMath::MaxWithRef(T x, T y, S refX, S refY, S& r)
{
if (x > y) {
r = refX;
return x;
}
r = refY;
return y;
}
GPUhdi() float GPUCommonMath::Sqrt(float x) { return CHOICE(sqrtf(x), sqrtf(x), sqrt(x)); }
template <>
GPUhdi() float GPUCommonMath::Abs<float>(float x)
{
return CHOICE(fabsf(x), fabsf(x), fabs(x));
}
#if !defined(__OPENCL__) || defined(cl_khr_fp64)
template <>
GPUhdi() double GPUCommonMath::Abs<double>(double x)
{
return CHOICE(fabs(x), fabs(x), fabs(x));
}
#endif
template <>
GPUhdi() int GPUCommonMath::Abs<int>(int x)
{
return CHOICE(abs(x), abs(x), abs(x));
}
GPUhdi() float GPUCommonMath::ASin(float x) { return CHOICE(asinf(x), asinf(x), asin(x)); }
GPUhdi() float GPUCommonMath::Log(float x) { return CHOICE(logf(x), logf(x), log(x)); }
GPUhdi() float GPUCommonMath::Copysign(float x, float y)
{
#if defined(__OPENCLCPP__)
return copysign(x, y);
#elif defined(GPUCA_GPUCODE) && !defined(__OPENCL__)
return copysignf(x, y);
#elif defined(__cplusplus) && __cplusplus >= 201103L
return std::copysignf(x, y);
#else
x = GPUCommonMath::Abs(x);
return (y >= 0) ? x : -x;
#endif // GPUCA_GPUCODE
}
#ifndef GPUCA_GPUCODE
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-value" // GCC BUG in omp atomic capture gives false warning
#endif
template <class S, class T>
GPUdi() unsigned int GPUCommonMath::AtomicExchInt(S* addr, T val)
{
#if defined(GPUCA_GPUCODE) && defined(__OPENCLCPP__) && (!defined(__clang__) || defined(GPUCA_OPENCL_CPP_CLANG_C11_ATOMICS))
return ::atomic_exchange(addr, val);
#elif defined(GPUCA_GPUCODE) && defined(__OPENCL__)
return ::atomic_xchg(addr, val);
#elif defined(GPUCA_GPUCODE) && (defined(__CUDACC__) || defined(__HIPCC__))
return ::atomicExch(addr, val);
#else
unsigned int old;
#ifdef WITH_OPENMP
#pragma omp atomic capture
#endif
{
old = *addr;
*addr = val;
}
return old;
#endif // GPUCA_GPUCODE
}
template <class S, class T>
GPUdi() unsigned int GPUCommonMath::AtomicAddInt(S* addr, T val)
{
#if defined(GPUCA_GPUCODE) && defined(__OPENCLCPP__) && (!defined(__clang__) || defined(GPUCA_OPENCL_CPP_CLANG_C11_ATOMICS))
return ::atomic_fetch_add(addr, val);
#elif defined(GPUCA_GPUCODE) && defined(__OPENCL__)
return ::atomic_add(addr, val);
#elif defined(GPUCA_GPUCODE) && (defined(__CUDACC__) || defined(__HIPCC__))
return ::atomicAdd(addr, val);
#else
unsigned int old;
#ifdef WITH_OPENMP
#pragma omp atomic capture
#endif
{
old = *addr;
*addr += val;
}
return old;
#endif // GPUCA_GPUCODE
}
template <class S, class T>
GPUdi() void GPUCommonMath::AtomicMaxInt(S* addr, T val)
{
#if defined(GPUCA_GPUCODE) && defined(__OPENCLCPP__) && (!defined(__clang__) || defined(GPUCA_OPENCL_CPP_CLANG_C11_ATOMICS))
::atomic_fetch_max(addr, val);
#elif defined(GPUCA_GPUCODE) && defined(__OPENCL__)
::atomic_max(addr, val);
#elif defined(GPUCA_GPUCODE) && (defined(__CUDACC__) || defined(__HIPCC__))
::atomicMax(addr, val);
#else
#ifdef WITH_OPENMP
while (*addr < val)
AtomicExch(addr, val);
#else
if (*addr < val)
*addr = val;
#endif
#endif // GPUCA_GPUCODE
}
template <class S, class T>
GPUdi() void GPUCommonMath::AtomicMinInt(S* addr, T val)
{
#if defined(GPUCA_GPUCODE) && defined(__OPENCLCPP__) && (!defined(__clang__) || defined(GPUCA_OPENCL_CPP_CLANG_C11_ATOMICS))
::atomic_fetch_min(addr, val);
#elif defined(GPUCA_GPUCODE) && defined(__OPENCL__)
::atomic_min(addr, val);
#elif defined(GPUCA_GPUCODE) && (defined(__CUDACC__) || defined(__HIPCC__))
::atomicMin(addr, val);
#else
#ifdef WITH_OPENMP
while (*addr > val)
AtomicExch(addr, val);
#else
if (*addr > val)
*addr = val;
#endif
#endif // GPUCA_GPUCODE
}
#ifndef GPUCA_GPUCODE
#pragma GCC diagnostic pop
#endif
#undef CHOICE
#if !defined(__OPENCL__) || defined(__OPENCLCPP__)
}
}
#endif
#endif // GPUCOMMONMATH_H
|
atomic_messages.c | // RUN: %clang_cc1 -verify -fopenmp=libiomp5 -ferror-limit 100 %s
int foo() {
L1:
foo();
#pragma omp atomic
// expected-error@+1 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}}
{
foo();
goto L1; // expected-error {{use of undeclared label 'L1'}}
}
goto L2; // expected-error {{use of undeclared label 'L2'}}
#pragma omp atomic
// expected-error@+1 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}}
{
foo();
L2:
foo();
}
return 0;
}
struct S {
int a;
};
int readint() {
int a = 0, b = 0;
// Test for atomic read
#pragma omp atomic read
// expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}}
// expected-note@+1 {{expected an expression statement}}
;
#pragma omp atomic read
// expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}}
// expected-note@+1 {{expected built-in assignment operator}}
foo();
#pragma omp atomic read
// expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}}
// expected-note@+1 {{expected built-in assignment operator}}
a += b;
#pragma omp atomic read
// expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}}
// expected-note@+1 {{expected lvalue expression}}
a = 0;
#pragma omp atomic read
a = b;
// expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'read' clause}}
#pragma omp atomic read read
a = b;
return 0;
}
int readS() {
struct S a, b;
// expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'read' clause}}
#pragma omp atomic read read
// expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}}
// expected-note@+1 {{expected expression of scalar type}}
a = b;
return a.a;
}
int writeint() {
int a = 0, b = 0;
// Test for atomic write
#pragma omp atomic write
// expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}}
// expected-note@+1 {{expected an expression statement}}
;
#pragma omp atomic write
// expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}}
// expected-note@+1 {{expected built-in assignment operator}}
foo();
#pragma omp atomic write
// expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}}
// expected-note@+1 {{expected built-in assignment operator}}
a += b;
#pragma omp atomic write
a = 0;
#pragma omp atomic write
a = b;
// expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'write' clause}}
#pragma omp atomic write write
a = b;
return 0;
}
int writeS() {
struct S a, b;
// expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'write' clause}}
#pragma omp atomic write write
// expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}}
// expected-note@+1 {{expected expression of scalar type}}
a = b;
return a.a;
}
|
7_data-env1.c | #include <stdio.h>
#include <omp.h>
#include <stdlib.h>
int main(int argc, char** argv) {
int x = 100;
omp_set_num_threads(20);
#pragma omp parallel
{
printf("Sou a thread %d, meu valor de x é %d\n", omp_get_thread_num(), x);
}
return 0;
} |
nowait-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
This example is extracted from a paper:
Ma etc. Symbolic Analysis of Concurrency Errors in OpenMP Programs, ICPP 2013
Some threads may finish the for loop early and execute errors = dt[9]+1
while another thread may still be simultaneously executing
the for worksharing region by writing to d[9], causing data races.
Data race pair: a[i]@72:7 vs. a[9]@75:13.
*/
#include <stdio.h>
int main()
{
int i,error;
int len = 1000;
int a[1000], b=5;
for (i=0; i<len; i++)
a[i]= i;
#pragma omp parallel shared(b, error)
{
#pragma omp for nowait
for(i = 0; i < len; i++)
a[i] = b + a[i]*5;
#pragma omp single
error = a[9] + 1;
}
printf ("error = %d\n", error);
return 0;
}
|
bitset.h | /*
* Copyright (c) 2016 drali. All rights reserved.
*
* This software is provided 'as-is', without any express or implied warranty.
* In no event will the author be held liable for any damages arising from the use of this software.
* Permission is granted to anyone to use this software for any purpose, including commercial
* applications, and to alter it and redistribute it freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software.
* 2. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
* 3. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
*/
#pragma once
#include <cstring> // for memcpy
#include <ostream>
#include <omp.h>
class BitSet {
public:
// Empty bit set
BitSet() = default;
void SetNumThreads(int num) {numthreads = num;}
// Allocated, but no bits set
BitSet(const size_t size) {
numthreads = 20;
allocated_ = (size >> 3) + 1;
bits_ = new unsigned char[allocated_];
size_ = size;
size_t i;
#pragma omp parallel for num_threads(numthreads) private(i)
for(i=0; i<allocated_; i++)
bits_[i] = 0;
}
unsigned char * Get()
{
return bits_;
}
~BitSet() {
if (bits_ != nullptr) {
delete [] bits_;
bits_ = nullptr;
}
}
BitSet(const BitSet& rhs) : allocated_(rhs.allocated_), size_(rhs.size_) {
if (allocated_ != 0) {
bits_ = new unsigned char[allocated_];
memcpy(bits_, rhs.bits_, (size_ >> 3) + 1);
}
}
BitSet& operator=(const BitSet& rhs) {
if (this != &rhs) {
allocated_ = rhs.allocated_;
size_ = rhs.size_;
bits_ = new unsigned char[allocated_];
memcpy(bits_, rhs.bits_, (size_ >> 3) + 1);
}
return *this;
}
BitSet(BitSet&& rhs) {
if (this == &rhs) { return; }
allocated_ = rhs.allocated_;
bits_ = rhs.bits_;
size_ = rhs.size_;
rhs.allocated_ = 0;
rhs.bits_ = nullptr;
rhs.size_ = 0;
}
BitSet& operator=(BitSet&& rhs) {
if (this == &rhs) { return *this; }
allocated_ = rhs.allocated_;
bits_ = rhs.bits_;
size_ = rhs.size_;
rhs.allocated_ = 0;
rhs.bits_ = nullptr;
rhs.size_ = 0;
return *this;
}
void Set(const size_t position) {
#pragma omp atomic
bits_[position >> 3] |= (1 << (position & 0x7));
}
void UnSet(const size_t position) {
//if(bits_[position >> 3])
{
#pragma omp atomic
bits_[position >> 3] &= (0 << (position & 0x7));
}
}
void ReSet() {
size_t i;
#pragma omp parallel for num_threads(numthreads) private(i)
for(i=0; i<allocated_; i++)
bits_[i] = 0;
}
void Reset() {
size_t i;
for(i=0; i<allocated_; i++)
bits_[i] = 0;
}
bool Test(const size_t i)
{
return bits_[i] != 0;
}
bool Any() {
bool ret = false;
size_t i;
#pragma omp parallel for num_threads(numthreads) private(i)
for(i=0; i<allocated_; i++)
if (bits_[i]) ret = true;
return ret;
}
// Returns number of bits
size_t GetSize() const {
return size_;
}
size_t GetBuckets() const {
return allocated_;
}
size_t Count() const {
size_t ret = 0;
size_t i;
#pragma omp parallel for num_threads(numthreads) private(i) reduction(+:ret)
for(i=0; i<size_; i++)
ret += (IsSet(i) ? 1 : 0);
return ret;
}
bool IsSet(const size_t position) const {
return (bits_[position >> 3] & (1 << (position & 0x7))) != 0;
}
friend std::ostream& operator<<(std::ostream& out, const BitSet& bit_set) {
for (size_t i = 0; i < bit_set.size_; ++i) {
out << bit_set.IsSet(i);
}
out << std::endl;
return out;
}
private:
unsigned char* bits_ = nullptr;
// Bytes allocated
size_t allocated_ = 0;
// Number of bits
size_t size_ = 0;
int numthreads;
};
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.